diff options
author | Krzysztof Kozlowski <k.kozlowski@samsung.com> | 2016-08-03 16:46:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 08:50:07 -0400 |
commit | 00085f1efa387a8ce100e3734920f7639c80caa3 (patch) | |
tree | 41ff3d6e6884918b4fc4f1ae96a284098167c5b0 /arch | |
parent | 1605d2715ad2e67ddd0485a26e05ed670a4285ca (diff) |
dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer. Thus the pointer can point to const data.
However the attributes do not have to be a bitfield. Instead unsigned
long will do fine:
1. This is just simpler. Both in terms of reading the code and setting
attributes. Instead of initializing local attributes on the stack
and passing pointer to it to dma_set_attr(), just set the bits.
2. It brings safeness and checking for const correctness because the
attributes are passed by value.
Semantic patches for this change (at least most of them):
virtual patch
virtual context
@r@
identifier f, attrs;
@@
f(...,
- struct dma_attrs *attrs
+ unsigned long attrs
, ...)
{
...
}
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
and
// Options: --all-includes
virtual patch
virtual context
@r@
identifier f, attrs;
type t;
@@
t f(..., struct dma_attrs *attrs);
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Acked-by: Mark Salter <msalter@redhat.com> [c6x]
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris]
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm]
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp]
Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core]
Acked-by: David Vrabel <david.vrabel@citrix.com> [xen]
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb]
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32]
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc]
Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
74 files changed, 437 insertions, 463 deletions
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 3c3451f58ff4..c63b6ac19ee5 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ALPHA_DMA_MAPPING_H | 1 | #ifndef _ALPHA_DMA_MAPPING_H |
2 | #define _ALPHA_DMA_MAPPING_H | 2 | #define _ALPHA_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/dma-attrs.h> | ||
5 | |||
6 | extern struct dma_map_ops *dma_ops; | 4 | extern struct dma_map_ops *dma_ops; |
7 | 5 | ||
8 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 6 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 8e735b5e56bd..bb152e21e5ae 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c | |||
@@ -109,7 +109,7 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn, | |||
109 | 109 | ||
110 | static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, | 110 | static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, |
111 | dma_addr_t *dma_handle, gfp_t gfp, | 111 | dma_addr_t *dma_handle, gfp_t gfp, |
112 | struct dma_attrs *attrs) | 112 | unsigned long attrs) |
113 | { | 113 | { |
114 | void *ret; | 114 | void *ret; |
115 | 115 | ||
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 8969bf2dfe3a..451fc9cdd323 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -349,7 +349,7 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) | |||
349 | static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, | 349 | static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, |
350 | unsigned long offset, size_t size, | 350 | unsigned long offset, size_t size, |
351 | enum dma_data_direction dir, | 351 | enum dma_data_direction dir, |
352 | struct dma_attrs *attrs) | 352 | unsigned long attrs) |
353 | { | 353 | { |
354 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 354 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
355 | int dac_allowed; | 355 | int dac_allowed; |
@@ -369,7 +369,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, | |||
369 | 369 | ||
370 | static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | 370 | static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, |
371 | size_t size, enum dma_data_direction dir, | 371 | size_t size, enum dma_data_direction dir, |
372 | struct dma_attrs *attrs) | 372 | unsigned long attrs) |
373 | { | 373 | { |
374 | unsigned long flags; | 374 | unsigned long flags; |
375 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 375 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
@@ -433,7 +433,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
433 | 433 | ||
434 | static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, | 434 | static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, |
435 | dma_addr_t *dma_addrp, gfp_t gfp, | 435 | dma_addr_t *dma_addrp, gfp_t gfp, |
436 | struct dma_attrs *attrs) | 436 | unsigned long attrs) |
437 | { | 437 | { |
438 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 438 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
439 | void *cpu_addr; | 439 | void *cpu_addr; |
@@ -478,7 +478,7 @@ try_again: | |||
478 | 478 | ||
479 | static void alpha_pci_free_coherent(struct device *dev, size_t size, | 479 | static void alpha_pci_free_coherent(struct device *dev, size_t size, |
480 | void *cpu_addr, dma_addr_t dma_addr, | 480 | void *cpu_addr, dma_addr_t dma_addr, |
481 | struct dma_attrs *attrs) | 481 | unsigned long attrs) |
482 | { | 482 | { |
483 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 483 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
484 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | 484 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
@@ -651,7 +651,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, | |||
651 | 651 | ||
652 | static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, | 652 | static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, |
653 | int nents, enum dma_data_direction dir, | 653 | int nents, enum dma_data_direction dir, |
654 | struct dma_attrs *attrs) | 654 | unsigned long attrs) |
655 | { | 655 | { |
656 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 656 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
657 | struct scatterlist *start, *end, *out; | 657 | struct scatterlist *start, *end, *out; |
@@ -729,7 +729,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, | |||
729 | 729 | ||
730 | static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, | 730 | static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, |
731 | int nents, enum dma_data_direction dir, | 731 | int nents, enum dma_data_direction dir, |
732 | struct dma_attrs *attrs) | 732 | unsigned long attrs) |
733 | { | 733 | { |
734 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 734 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
735 | unsigned long flags; | 735 | unsigned long flags; |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index ab74b5d9186c..20afc65e22dc 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | 23 | ||
24 | static void *arc_dma_alloc(struct device *dev, size_t size, | 24 | static void *arc_dma_alloc(struct device *dev, size_t size, |
25 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 25 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
26 | { | 26 | { |
27 | unsigned long order = get_order(size); | 27 | unsigned long order = get_order(size); |
28 | struct page *page; | 28 | struct page *page; |
@@ -46,7 +46,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, | |||
46 | * (vs. always going to memory - thus are faster) | 46 | * (vs. always going to memory - thus are faster) |
47 | */ | 47 | */ |
48 | if ((is_isa_arcv2() && ioc_exists) || | 48 | if ((is_isa_arcv2() && ioc_exists) || |
49 | dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 49 | (attrs & DMA_ATTR_NON_CONSISTENT)) |
50 | need_coh = 0; | 50 | need_coh = 0; |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -90,13 +90,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size, | |||
90 | } | 90 | } |
91 | 91 | ||
92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | 92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
93 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 93 | dma_addr_t dma_handle, unsigned long attrs) |
94 | { | 94 | { |
95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); | 95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); |
96 | struct page *page = virt_to_page(paddr); | 96 | struct page *page = virt_to_page(paddr); |
97 | int is_non_coh = 1; | 97 | int is_non_coh = 1; |
98 | 98 | ||
99 | is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || | 99 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || |
100 | (is_isa_arcv2() && ioc_exists); | 100 | (is_isa_arcv2() && ioc_exists); |
101 | 101 | ||
102 | if (PageHighMem(page) || !is_non_coh) | 102 | if (PageHighMem(page) || !is_non_coh) |
@@ -130,7 +130,7 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size, | |||
130 | 130 | ||
131 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | 131 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, |
132 | unsigned long offset, size_t size, enum dma_data_direction dir, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | phys_addr_t paddr = page_to_phys(page) + offset; | 135 | phys_addr_t paddr = page_to_phys(page) + offset; |
136 | _dma_cache_sync(paddr, size, dir); | 136 | _dma_cache_sync(paddr, size, dir); |
@@ -138,7 +138,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | 140 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, |
141 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 141 | int nents, enum dma_data_direction dir, unsigned long attrs) |
142 | { | 142 | { |
143 | struct scatterlist *s; | 143 | struct scatterlist *s; |
144 | int i; | 144 | int i; |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1143c4d5c567..301281645d08 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -310,7 +310,7 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, | |||
310 | */ | 310 | */ |
311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, | 311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, |
312 | unsigned long offset, size_t size, enum dma_data_direction dir, | 312 | unsigned long offset, size_t size, enum dma_data_direction dir, |
313 | struct dma_attrs *attrs) | 313 | unsigned long attrs) |
314 | { | 314 | { |
315 | dma_addr_t dma_addr; | 315 | dma_addr_t dma_addr; |
316 | int ret; | 316 | int ret; |
@@ -344,7 +344,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, | |||
344 | * should be) | 344 | * should be) |
345 | */ | 345 | */ |
346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
347 | enum dma_data_direction dir, struct dma_attrs *attrs) | 347 | enum dma_data_direction dir, unsigned long attrs) |
348 | { | 348 | { |
349 | struct safe_buffer *buf; | 349 | struct safe_buffer *buf; |
350 | 350 | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a83570f10124..d009f7911ffc 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | #include <linux/mm_types.h> | 6 | #include <linux/mm_types.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-attrs.h> | ||
9 | #include <linux/dma-debug.h> | 8 | #include <linux/dma-debug.h> |
10 | 9 | ||
11 | #include <asm/memory.h> | 10 | #include <asm/memory.h> |
@@ -174,7 +173,7 @@ static inline void dma_mark_clean(void *addr, size_t size) { } | |||
174 | * to be the device-viewed address. | 173 | * to be the device-viewed address. |
175 | */ | 174 | */ |
176 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 175 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
177 | gfp_t gfp, struct dma_attrs *attrs); | 176 | gfp_t gfp, unsigned long attrs); |
178 | 177 | ||
179 | /** | 178 | /** |
180 | * arm_dma_free - free memory allocated by arm_dma_alloc | 179 | * arm_dma_free - free memory allocated by arm_dma_alloc |
@@ -191,7 +190,7 @@ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
191 | * during and after this call executing are illegal. | 190 | * during and after this call executing are illegal. |
192 | */ | 191 | */ |
193 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 192 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
194 | dma_addr_t handle, struct dma_attrs *attrs); | 193 | dma_addr_t handle, unsigned long attrs); |
195 | 194 | ||
196 | /** | 195 | /** |
197 | * arm_dma_mmap - map a coherent DMA allocation into user space | 196 | * arm_dma_mmap - map a coherent DMA allocation into user space |
@@ -208,7 +207,7 @@ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
208 | */ | 207 | */ |
209 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 208 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
210 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 209 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
211 | struct dma_attrs *attrs); | 210 | unsigned long attrs); |
212 | 211 | ||
213 | /* | 212 | /* |
214 | * This can be called during early boot to increase the size of the atomic | 213 | * This can be called during early boot to increase the size of the atomic |
@@ -262,16 +261,16 @@ extern void dmabounce_unregister_dev(struct device *); | |||
262 | * The scatter list versions of the above methods. | 261 | * The scatter list versions of the above methods. |
263 | */ | 262 | */ |
264 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, | 263 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
265 | enum dma_data_direction, struct dma_attrs *attrs); | 264 | enum dma_data_direction, unsigned long attrs); |
266 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, | 265 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
267 | enum dma_data_direction, struct dma_attrs *attrs); | 266 | enum dma_data_direction, unsigned long attrs); |
268 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | 267 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, |
269 | enum dma_data_direction); | 268 | enum dma_data_direction); |
270 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | 269 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
271 | enum dma_data_direction); | 270 | enum dma_data_direction); |
272 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 271 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
273 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 272 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
274 | struct dma_attrs *attrs); | 273 | unsigned long attrs); |
275 | 274 | ||
276 | #endif /* __KERNEL__ */ | 275 | #endif /* __KERNEL__ */ |
277 | #endif | 276 | #endif |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 9408a994cc91..95ce6ac3a971 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -2,15 +2,14 @@ | |||
2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | 2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | 5 | #include <linux/dma-mapping.h> |
7 | 6 | ||
8 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | 7 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
9 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 8 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
10 | enum dma_data_direction dir, struct dma_attrs *attrs); | 9 | enum dma_data_direction dir, unsigned long attrs); |
11 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 10 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
12 | size_t size, enum dma_data_direction dir, | 11 | size_t size, enum dma_data_direction dir, |
13 | struct dma_attrs *attrs); | 12 | unsigned long attrs); |
14 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | 13 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
15 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 14 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
16 | 15 | ||
@@ -18,22 +17,20 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, | |||
18 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 17 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
19 | 18 | ||
20 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 19 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
21 | dma_addr_t *dma_handle, gfp_t flags, | 20 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) |
22 | struct dma_attrs *attrs) | ||
23 | { | 21 | { |
24 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | 22 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); |
25 | } | 23 | } |
26 | 24 | ||
27 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | 25 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, |
28 | void *cpu_addr, dma_addr_t dma_handle, | 26 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) |
29 | struct dma_attrs *attrs) | ||
30 | { | 27 | { |
31 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | 28 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); |
32 | } | 29 | } |
33 | 30 | ||
34 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 31 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
35 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 32 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
36 | enum dma_data_direction dir, struct dma_attrs *attrs) | 33 | enum dma_data_direction dir, unsigned long attrs) |
37 | { | 34 | { |
38 | unsigned long page_pfn = page_to_xen_pfn(page); | 35 | unsigned long page_pfn = page_to_xen_pfn(page); |
39 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); | 36 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); |
@@ -58,8 +55,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
58 | } | 55 | } |
59 | 56 | ||
60 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 57 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
61 | size_t size, enum dma_data_direction dir, | 58 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
62 | struct dma_attrs *attrs) | ||
63 | { | 59 | { |
64 | unsigned long pfn = PFN_DOWN(handle); | 60 | unsigned long pfn = PFN_DOWN(handle); |
65 | /* | 61 | /* |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b7eed75960fe..c6834c0cfd1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -128,16 +128,16 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned long, | |||
128 | */ | 128 | */ |
129 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | 129 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, |
130 | unsigned long offset, size_t size, enum dma_data_direction dir, | 130 | unsigned long offset, size_t size, enum dma_data_direction dir, |
131 | struct dma_attrs *attrs) | 131 | unsigned long attrs) |
132 | { | 132 | { |
133 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 133 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
134 | __dma_page_cpu_to_dev(page, offset, size, dir); | 134 | __dma_page_cpu_to_dev(page, offset, size, dir); |
135 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 135 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
136 | } | 136 | } |
137 | 137 | ||
138 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, | 138 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, |
139 | unsigned long offset, size_t size, enum dma_data_direction dir, | 139 | unsigned long offset, size_t size, enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | 140 | unsigned long attrs) |
141 | { | 141 | { |
142 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 142 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
143 | } | 143 | } |
@@ -157,10 +157,9 @@ static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *pag | |||
157 | * whatever the device wrote there. | 157 | * whatever the device wrote there. |
158 | */ | 158 | */ |
159 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | 159 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, |
160 | size_t size, enum dma_data_direction dir, | 160 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
161 | struct dma_attrs *attrs) | ||
162 | { | 161 | { |
163 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 162 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
164 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | 163 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
165 | handle & ~PAGE_MASK, size, dir); | 164 | handle & ~PAGE_MASK, size, dir); |
166 | } | 165 | } |
@@ -198,12 +197,12 @@ struct dma_map_ops arm_dma_ops = { | |||
198 | EXPORT_SYMBOL(arm_dma_ops); | 197 | EXPORT_SYMBOL(arm_dma_ops); |
199 | 198 | ||
200 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 199 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, |
201 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); | 200 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs); |
202 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 201 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, |
203 | dma_addr_t handle, struct dma_attrs *attrs); | 202 | dma_addr_t handle, unsigned long attrs); |
204 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 203 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
205 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 204 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
206 | struct dma_attrs *attrs); | 205 | unsigned long attrs); |
207 | 206 | ||
208 | struct dma_map_ops arm_coherent_dma_ops = { | 207 | struct dma_map_ops arm_coherent_dma_ops = { |
209 | .alloc = arm_coherent_dma_alloc, | 208 | .alloc = arm_coherent_dma_alloc, |
@@ -639,11 +638,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page, | |||
639 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); | 638 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); |
640 | } | 639 | } |
641 | 640 | ||
642 | static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) | 641 | static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) |
643 | { | 642 | { |
644 | prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? | 643 | prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? |
645 | pgprot_writecombine(prot) : | 644 | pgprot_writecombine(prot) : |
646 | pgprot_dmacoherent(prot); | 645 | pgprot_dmacoherent(prot); |
647 | return prot; | 646 | return prot; |
648 | } | 647 | } |
649 | 648 | ||
@@ -751,7 +750,7 @@ static struct arm_dma_allocator remap_allocator = { | |||
751 | 750 | ||
752 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 751 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
753 | gfp_t gfp, pgprot_t prot, bool is_coherent, | 752 | gfp_t gfp, pgprot_t prot, bool is_coherent, |
754 | struct dma_attrs *attrs, const void *caller) | 753 | unsigned long attrs, const void *caller) |
755 | { | 754 | { |
756 | u64 mask = get_coherent_dma_mask(dev); | 755 | u64 mask = get_coherent_dma_mask(dev); |
757 | struct page *page = NULL; | 756 | struct page *page = NULL; |
@@ -764,7 +763,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
764 | .gfp = gfp, | 763 | .gfp = gfp, |
765 | .prot = prot, | 764 | .prot = prot, |
766 | .caller = caller, | 765 | .caller = caller, |
767 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | 766 | .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), |
768 | .coherent_flag = is_coherent ? COHERENT : NORMAL, | 767 | .coherent_flag = is_coherent ? COHERENT : NORMAL, |
769 | }; | 768 | }; |
770 | 769 | ||
@@ -834,7 +833,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
834 | * virtual and bus address for that space. | 833 | * virtual and bus address for that space. |
835 | */ | 834 | */ |
836 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 835 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
837 | gfp_t gfp, struct dma_attrs *attrs) | 836 | gfp_t gfp, unsigned long attrs) |
838 | { | 837 | { |
839 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); | 838 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); |
840 | 839 | ||
@@ -843,7 +842,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
843 | } | 842 | } |
844 | 843 | ||
845 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 844 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, |
846 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 845 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
847 | { | 846 | { |
848 | return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, | 847 | return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, |
849 | attrs, __builtin_return_address(0)); | 848 | attrs, __builtin_return_address(0)); |
@@ -851,7 +850,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | |||
851 | 850 | ||
852 | static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 851 | static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
853 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 852 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
854 | struct dma_attrs *attrs) | 853 | unsigned long attrs) |
855 | { | 854 | { |
856 | int ret = -ENXIO; | 855 | int ret = -ENXIO; |
857 | #ifdef CONFIG_MMU | 856 | #ifdef CONFIG_MMU |
@@ -879,14 +878,14 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
879 | */ | 878 | */ |
880 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 879 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
881 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 880 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
882 | struct dma_attrs *attrs) | 881 | unsigned long attrs) |
883 | { | 882 | { |
884 | return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | 883 | return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
885 | } | 884 | } |
886 | 885 | ||
887 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 886 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
888 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 887 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
889 | struct dma_attrs *attrs) | 888 | unsigned long attrs) |
890 | { | 889 | { |
891 | #ifdef CONFIG_MMU | 890 | #ifdef CONFIG_MMU |
892 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 891 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); |
@@ -898,7 +897,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
898 | * Free a buffer as defined by the above mapping. | 897 | * Free a buffer as defined by the above mapping. |
899 | */ | 898 | */ |
900 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 899 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
901 | dma_addr_t handle, struct dma_attrs *attrs, | 900 | dma_addr_t handle, unsigned long attrs, |
902 | bool is_coherent) | 901 | bool is_coherent) |
903 | { | 902 | { |
904 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 903 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); |
@@ -908,7 +907,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
908 | .size = PAGE_ALIGN(size), | 907 | .size = PAGE_ALIGN(size), |
909 | .cpu_addr = cpu_addr, | 908 | .cpu_addr = cpu_addr, |
910 | .page = page, | 909 | .page = page, |
911 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | 910 | .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), |
912 | }; | 911 | }; |
913 | 912 | ||
914 | buf = arm_dma_buffer_find(cpu_addr); | 913 | buf = arm_dma_buffer_find(cpu_addr); |
@@ -920,20 +919,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
920 | } | 919 | } |
921 | 920 | ||
922 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 921 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
923 | dma_addr_t handle, struct dma_attrs *attrs) | 922 | dma_addr_t handle, unsigned long attrs) |
924 | { | 923 | { |
925 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); | 924 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); |
926 | } | 925 | } |
927 | 926 | ||
928 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 927 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, |
929 | dma_addr_t handle, struct dma_attrs *attrs) | 928 | dma_addr_t handle, unsigned long attrs) |
930 | { | 929 | { |
931 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); | 930 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); |
932 | } | 931 | } |
933 | 932 | ||
934 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 933 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
935 | void *cpu_addr, dma_addr_t handle, size_t size, | 934 | void *cpu_addr, dma_addr_t handle, size_t size, |
936 | struct dma_attrs *attrs) | 935 | unsigned long attrs) |
937 | { | 936 | { |
938 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 937 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); |
939 | int ret; | 938 | int ret; |
@@ -1066,7 +1065,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
1066 | * here. | 1065 | * here. |
1067 | */ | 1066 | */ |
1068 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 1067 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
1069 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1068 | enum dma_data_direction dir, unsigned long attrs) |
1070 | { | 1069 | { |
1071 | struct dma_map_ops *ops = get_dma_ops(dev); | 1070 | struct dma_map_ops *ops = get_dma_ops(dev); |
1072 | struct scatterlist *s; | 1071 | struct scatterlist *s; |
@@ -1100,7 +1099,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1100 | * rules concerning calls here are the same as for dma_unmap_single(). | 1099 | * rules concerning calls here are the same as for dma_unmap_single(). |
1101 | */ | 1100 | */ |
1102 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 1101 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
1103 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1102 | enum dma_data_direction dir, unsigned long attrs) |
1104 | { | 1103 | { |
1105 | struct dma_map_ops *ops = get_dma_ops(dev); | 1104 | struct dma_map_ops *ops = get_dma_ops(dev); |
1106 | struct scatterlist *s; | 1105 | struct scatterlist *s; |
@@ -1273,7 +1272,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, | |||
1273 | static const int iommu_order_array[] = { 9, 8, 4, 0 }; | 1272 | static const int iommu_order_array[] = { 9, 8, 4, 0 }; |
1274 | 1273 | ||
1275 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | 1274 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, |
1276 | gfp_t gfp, struct dma_attrs *attrs, | 1275 | gfp_t gfp, unsigned long attrs, |
1277 | int coherent_flag) | 1276 | int coherent_flag) |
1278 | { | 1277 | { |
1279 | struct page **pages; | 1278 | struct page **pages; |
@@ -1289,7 +1288,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | |||
1289 | if (!pages) | 1288 | if (!pages) |
1290 | return NULL; | 1289 | return NULL; |
1291 | 1290 | ||
1292 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) | 1291 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) |
1293 | { | 1292 | { |
1294 | unsigned long order = get_order(size); | 1293 | unsigned long order = get_order(size); |
1295 | struct page *page; | 1294 | struct page *page; |
@@ -1307,7 +1306,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | |||
1307 | } | 1306 | } |
1308 | 1307 | ||
1309 | /* Go straight to 4K chunks if caller says it's OK. */ | 1308 | /* Go straight to 4K chunks if caller says it's OK. */ |
1310 | if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) | 1309 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
1311 | order_idx = ARRAY_SIZE(iommu_order_array) - 1; | 1310 | order_idx = ARRAY_SIZE(iommu_order_array) - 1; |
1312 | 1311 | ||
1313 | /* | 1312 | /* |
@@ -1363,12 +1362,12 @@ error: | |||
1363 | } | 1362 | } |
1364 | 1363 | ||
1365 | static int __iommu_free_buffer(struct device *dev, struct page **pages, | 1364 | static int __iommu_free_buffer(struct device *dev, struct page **pages, |
1366 | size_t size, struct dma_attrs *attrs) | 1365 | size_t size, unsigned long attrs) |
1367 | { | 1366 | { |
1368 | int count = size >> PAGE_SHIFT; | 1367 | int count = size >> PAGE_SHIFT; |
1369 | int i; | 1368 | int i; |
1370 | 1369 | ||
1371 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { | 1370 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { |
1372 | dma_release_from_contiguous(dev, pages[0], count); | 1371 | dma_release_from_contiguous(dev, pages[0], count); |
1373 | } else { | 1372 | } else { |
1374 | for (i = 0; i < count; i++) | 1373 | for (i = 0; i < count; i++) |
@@ -1460,14 +1459,14 @@ static struct page **__atomic_get_pages(void *addr) | |||
1460 | return (struct page **)page; | 1459 | return (struct page **)page; |
1461 | } | 1460 | } |
1462 | 1461 | ||
1463 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | 1462 | static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) |
1464 | { | 1463 | { |
1465 | struct vm_struct *area; | 1464 | struct vm_struct *area; |
1466 | 1465 | ||
1467 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | 1466 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) |
1468 | return __atomic_get_pages(cpu_addr); | 1467 | return __atomic_get_pages(cpu_addr); |
1469 | 1468 | ||
1470 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1469 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
1471 | return cpu_addr; | 1470 | return cpu_addr; |
1472 | 1471 | ||
1473 | area = find_vm_area(cpu_addr); | 1472 | area = find_vm_area(cpu_addr); |
@@ -1511,7 +1510,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr, | |||
1511 | } | 1510 | } |
1512 | 1511 | ||
1513 | static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1512 | static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1514 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs, | 1513 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs, |
1515 | int coherent_flag) | 1514 | int coherent_flag) |
1516 | { | 1515 | { |
1517 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); | 1516 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); |
@@ -1542,7 +1541,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1542 | if (*handle == DMA_ERROR_CODE) | 1541 | if (*handle == DMA_ERROR_CODE) |
1543 | goto err_buffer; | 1542 | goto err_buffer; |
1544 | 1543 | ||
1545 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1544 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
1546 | return pages; | 1545 | return pages; |
1547 | 1546 | ||
1548 | addr = __iommu_alloc_remap(pages, size, gfp, prot, | 1547 | addr = __iommu_alloc_remap(pages, size, gfp, prot, |
@@ -1560,20 +1559,20 @@ err_buffer: | |||
1560 | } | 1559 | } |
1561 | 1560 | ||
1562 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1561 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1563 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1562 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
1564 | { | 1563 | { |
1565 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); | 1564 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); |
1566 | } | 1565 | } |
1567 | 1566 | ||
1568 | static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, | 1567 | static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, |
1569 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1568 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
1570 | { | 1569 | { |
1571 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); | 1570 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); |
1572 | } | 1571 | } |
1573 | 1572 | ||
1574 | static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 1573 | static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
1575 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 1574 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
1576 | struct dma_attrs *attrs) | 1575 | unsigned long attrs) |
1577 | { | 1576 | { |
1578 | unsigned long uaddr = vma->vm_start; | 1577 | unsigned long uaddr = vma->vm_start; |
1579 | unsigned long usize = vma->vm_end - vma->vm_start; | 1578 | unsigned long usize = vma->vm_end - vma->vm_start; |
@@ -1603,7 +1602,7 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma | |||
1603 | } | 1602 | } |
1604 | static int arm_iommu_mmap_attrs(struct device *dev, | 1603 | static int arm_iommu_mmap_attrs(struct device *dev, |
1605 | struct vm_area_struct *vma, void *cpu_addr, | 1604 | struct vm_area_struct *vma, void *cpu_addr, |
1606 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 1605 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
1607 | { | 1606 | { |
1608 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 1607 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); |
1609 | 1608 | ||
@@ -1612,7 +1611,7 @@ static int arm_iommu_mmap_attrs(struct device *dev, | |||
1612 | 1611 | ||
1613 | static int arm_coherent_iommu_mmap_attrs(struct device *dev, | 1612 | static int arm_coherent_iommu_mmap_attrs(struct device *dev, |
1614 | struct vm_area_struct *vma, void *cpu_addr, | 1613 | struct vm_area_struct *vma, void *cpu_addr, |
1615 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 1614 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
1616 | { | 1615 | { |
1617 | return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); | 1616 | return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); |
1618 | } | 1617 | } |
@@ -1622,7 +1621,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev, | |||
1622 | * Must not be called with IRQs disabled. | 1621 | * Must not be called with IRQs disabled. |
1623 | */ | 1622 | */ |
1624 | void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 1623 | void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
1625 | dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag) | 1624 | dma_addr_t handle, unsigned long attrs, int coherent_flag) |
1626 | { | 1625 | { |
1627 | struct page **pages; | 1626 | struct page **pages; |
1628 | size = PAGE_ALIGN(size); | 1627 | size = PAGE_ALIGN(size); |
@@ -1638,7 +1637,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1638 | return; | 1637 | return; |
1639 | } | 1638 | } |
1640 | 1639 | ||
1641 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 1640 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { |
1642 | dma_common_free_remap(cpu_addr, size, | 1641 | dma_common_free_remap(cpu_addr, size, |
1643 | VM_ARM_DMA_CONSISTENT | VM_USERMAP); | 1642 | VM_ARM_DMA_CONSISTENT | VM_USERMAP); |
1644 | } | 1643 | } |
@@ -1648,20 +1647,20 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1648 | } | 1647 | } |
1649 | 1648 | ||
1650 | void arm_iommu_free_attrs(struct device *dev, size_t size, | 1649 | void arm_iommu_free_attrs(struct device *dev, size_t size, |
1651 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 1650 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
1652 | { | 1651 | { |
1653 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); | 1652 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); |
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, | 1655 | void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, |
1657 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 1656 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
1658 | { | 1657 | { |
1659 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); | 1658 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); |
1660 | } | 1659 | } |
1661 | 1660 | ||
1662 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | 1661 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, |
1663 | void *cpu_addr, dma_addr_t dma_addr, | 1662 | void *cpu_addr, dma_addr_t dma_addr, |
1664 | size_t size, struct dma_attrs *attrs) | 1663 | size_t size, unsigned long attrs) |
1665 | { | 1664 | { |
1666 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 1665 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
1667 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 1666 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); |
@@ -1699,7 +1698,7 @@ static int __dma_direction_to_prot(enum dma_data_direction dir) | |||
1699 | */ | 1698 | */ |
1700 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | 1699 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, |
1701 | size_t size, dma_addr_t *handle, | 1700 | size_t size, dma_addr_t *handle, |
1702 | enum dma_data_direction dir, struct dma_attrs *attrs, | 1701 | enum dma_data_direction dir, unsigned long attrs, |
1703 | bool is_coherent) | 1702 | bool is_coherent) |
1704 | { | 1703 | { |
1705 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1704 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
@@ -1720,8 +1719,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1720 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1719 | phys_addr_t phys = page_to_phys(sg_page(s)); |
1721 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1720 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
1722 | 1721 | ||
1723 | if (!is_coherent && | 1722 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1724 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1725 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 1723 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1726 | 1724 | ||
1727 | prot = __dma_direction_to_prot(dir); | 1725 | prot = __dma_direction_to_prot(dir); |
@@ -1742,7 +1740,7 @@ fail: | |||
1742 | } | 1740 | } |
1743 | 1741 | ||
1744 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 1742 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
1745 | enum dma_data_direction dir, struct dma_attrs *attrs, | 1743 | enum dma_data_direction dir, unsigned long attrs, |
1746 | bool is_coherent) | 1744 | bool is_coherent) |
1747 | { | 1745 | { |
1748 | struct scatterlist *s = sg, *dma = sg, *start = sg; | 1746 | struct scatterlist *s = sg, *dma = sg, *start = sg; |
@@ -1800,7 +1798,7 @@ bad_mapping: | |||
1800 | * obtained via sg_dma_{address,length}. | 1798 | * obtained via sg_dma_{address,length}. |
1801 | */ | 1799 | */ |
1802 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 1800 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
1803 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1801 | int nents, enum dma_data_direction dir, unsigned long attrs) |
1804 | { | 1802 | { |
1805 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); | 1803 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); |
1806 | } | 1804 | } |
@@ -1818,14 +1816,14 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | |||
1818 | * sg_dma_{address,length}. | 1816 | * sg_dma_{address,length}. |
1819 | */ | 1817 | */ |
1820 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 1818 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
1821 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1819 | int nents, enum dma_data_direction dir, unsigned long attrs) |
1822 | { | 1820 | { |
1823 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); | 1821 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); |
1824 | } | 1822 | } |
1825 | 1823 | ||
1826 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 1824 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, |
1827 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs, | 1825 | int nents, enum dma_data_direction dir, |
1828 | bool is_coherent) | 1826 | unsigned long attrs, bool is_coherent) |
1829 | { | 1827 | { |
1830 | struct scatterlist *s; | 1828 | struct scatterlist *s; |
1831 | int i; | 1829 | int i; |
@@ -1834,8 +1832,7 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1834 | if (sg_dma_len(s)) | 1832 | if (sg_dma_len(s)) |
1835 | __iommu_remove_mapping(dev, sg_dma_address(s), | 1833 | __iommu_remove_mapping(dev, sg_dma_address(s), |
1836 | sg_dma_len(s)); | 1834 | sg_dma_len(s)); |
1837 | if (!is_coherent && | 1835 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1838 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1839 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 1836 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
1840 | s->length, dir); | 1837 | s->length, dir); |
1841 | } | 1838 | } |
@@ -1852,7 +1849,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1852 | * rules concerning calls here are the same as for dma_unmap_single(). | 1849 | * rules concerning calls here are the same as for dma_unmap_single(). |
1853 | */ | 1850 | */ |
1854 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 1851 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, |
1855 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1852 | int nents, enum dma_data_direction dir, |
1853 | unsigned long attrs) | ||
1856 | { | 1854 | { |
1857 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); | 1855 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); |
1858 | } | 1856 | } |
@@ -1868,7 +1866,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1868 | * rules concerning calls here are the same as for dma_unmap_single(). | 1866 | * rules concerning calls here are the same as for dma_unmap_single(). |
1869 | */ | 1867 | */ |
1870 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 1868 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
1871 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1869 | enum dma_data_direction dir, |
1870 | unsigned long attrs) | ||
1872 | { | 1871 | { |
1873 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); | 1872 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); |
1874 | } | 1873 | } |
@@ -1921,7 +1920,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1921 | */ | 1920 | */ |
1922 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, | 1921 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, |
1923 | unsigned long offset, size_t size, enum dma_data_direction dir, | 1922 | unsigned long offset, size_t size, enum dma_data_direction dir, |
1924 | struct dma_attrs *attrs) | 1923 | unsigned long attrs) |
1925 | { | 1924 | { |
1926 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1925 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1927 | dma_addr_t dma_addr; | 1926 | dma_addr_t dma_addr; |
@@ -1955,9 +1954,9 @@ fail: | |||
1955 | */ | 1954 | */ |
1956 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | 1955 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, |
1957 | unsigned long offset, size_t size, enum dma_data_direction dir, | 1956 | unsigned long offset, size_t size, enum dma_data_direction dir, |
1958 | struct dma_attrs *attrs) | 1957 | unsigned long attrs) |
1959 | { | 1958 | { |
1960 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 1959 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1961 | __dma_page_cpu_to_dev(page, offset, size, dir); | 1960 | __dma_page_cpu_to_dev(page, offset, size, dir); |
1962 | 1961 | ||
1963 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); | 1962 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); |
@@ -1973,8 +1972,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | |||
1973 | * Coherent IOMMU aware version of arm_dma_unmap_page() | 1972 | * Coherent IOMMU aware version of arm_dma_unmap_page() |
1974 | */ | 1973 | */ |
1975 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 1974 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, |
1976 | size_t size, enum dma_data_direction dir, | 1975 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1977 | struct dma_attrs *attrs) | ||
1978 | { | 1976 | { |
1979 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1977 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1980 | dma_addr_t iova = handle & PAGE_MASK; | 1978 | dma_addr_t iova = handle & PAGE_MASK; |
@@ -1998,8 +1996,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
1998 | * IOMMU aware version of arm_dma_unmap_page() | 1996 | * IOMMU aware version of arm_dma_unmap_page() |
1999 | */ | 1997 | */ |
2000 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 1998 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, |
2001 | size_t size, enum dma_data_direction dir, | 1999 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
2002 | struct dma_attrs *attrs) | ||
2003 | { | 2000 | { |
2004 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 2001 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
2005 | dma_addr_t iova = handle & PAGE_MASK; | 2002 | dma_addr_t iova = handle & PAGE_MASK; |
@@ -2010,7 +2007,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
2010 | if (!iova) | 2007 | if (!iova) |
2011 | return; | 2008 | return; |
2012 | 2009 | ||
2013 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 2010 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
2014 | __dma_page_dev_to_cpu(page, offset, size, dir); | 2011 | __dma_page_dev_to_cpu(page, offset, size, dir); |
2015 | 2012 | ||
2016 | iommu_unmap(mapping->domain, iova, len); | 2013 | iommu_unmap(mapping->domain, iova, len); |
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index c5f9a9e3d1f3..d062f08f5020 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -98,11 +98,11 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | |||
98 | 98 | ||
99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | 99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
100 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 100 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
101 | enum dma_data_direction dir, struct dma_attrs *attrs) | 101 | enum dma_data_direction dir, unsigned long attrs) |
102 | { | 102 | { |
103 | if (is_device_dma_coherent(hwdev)) | 103 | if (is_device_dma_coherent(hwdev)) |
104 | return; | 104 | return; |
105 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 105 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
106 | return; | 106 | return; |
107 | 107 | ||
108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); | 108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); |
@@ -110,12 +110,12 @@ void __xen_dma_map_page(struct device *hwdev, struct page *page, | |||
110 | 110 | ||
111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
112 | size_t size, enum dma_data_direction dir, | 112 | size_t size, enum dma_data_direction dir, |
113 | struct dma_attrs *attrs) | 113 | unsigned long attrs) |
114 | 114 | ||
115 | { | 115 | { |
116 | if (is_device_dma_coherent(hwdev)) | 116 | if (is_device_dma_coherent(hwdev)) |
117 | return; | 117 | return; |
118 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 118 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
119 | return; | 119 | return; |
120 | 120 | ||
121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | 121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index f6c55afab3e2..c4284c432ae8 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -32,10 +32,10 @@ | |||
32 | 32 | ||
33 | static int swiotlb __read_mostly; | 33 | static int swiotlb __read_mostly; |
34 | 34 | ||
35 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, | 35 | static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, |
36 | bool coherent) | 36 | bool coherent) |
37 | { | 37 | { |
38 | if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 38 | if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) |
39 | return pgprot_writecombine(prot); | 39 | return pgprot_writecombine(prot); |
40 | return prot; | 40 | return prot; |
41 | } | 41 | } |
@@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size) | |||
91 | 91 | ||
92 | static void *__dma_alloc_coherent(struct device *dev, size_t size, | 92 | static void *__dma_alloc_coherent(struct device *dev, size_t size, |
93 | dma_addr_t *dma_handle, gfp_t flags, | 93 | dma_addr_t *dma_handle, gfp_t flags, |
94 | struct dma_attrs *attrs) | 94 | unsigned long attrs) |
95 | { | 95 | { |
96 | if (dev == NULL) { | 96 | if (dev == NULL) { |
97 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); | 97 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); |
@@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
121 | 121 | ||
122 | static void __dma_free_coherent(struct device *dev, size_t size, | 122 | static void __dma_free_coherent(struct device *dev, size_t size, |
123 | void *vaddr, dma_addr_t dma_handle, | 123 | void *vaddr, dma_addr_t dma_handle, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | bool freed; | 126 | bool freed; |
127 | phys_addr_t paddr = dma_to_phys(dev, dma_handle); | 127 | phys_addr_t paddr = dma_to_phys(dev, dma_handle); |
@@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size, | |||
140 | 140 | ||
141 | static void *__dma_alloc(struct device *dev, size_t size, | 141 | static void *__dma_alloc(struct device *dev, size_t size, |
142 | dma_addr_t *dma_handle, gfp_t flags, | 142 | dma_addr_t *dma_handle, gfp_t flags, |
143 | struct dma_attrs *attrs) | 143 | unsigned long attrs) |
144 | { | 144 | { |
145 | struct page *page; | 145 | struct page *page; |
146 | void *ptr, *coherent_ptr; | 146 | void *ptr, *coherent_ptr; |
@@ -188,7 +188,7 @@ no_mem: | |||
188 | 188 | ||
189 | static void __dma_free(struct device *dev, size_t size, | 189 | static void __dma_free(struct device *dev, size_t size, |
190 | void *vaddr, dma_addr_t dma_handle, | 190 | void *vaddr, dma_addr_t dma_handle, |
191 | struct dma_attrs *attrs) | 191 | unsigned long attrs) |
192 | { | 192 | { |
193 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); | 193 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); |
194 | 194 | ||
@@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size, | |||
205 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | 205 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, |
206 | unsigned long offset, size_t size, | 206 | unsigned long offset, size_t size, |
207 | enum dma_data_direction dir, | 207 | enum dma_data_direction dir, |
208 | struct dma_attrs *attrs) | 208 | unsigned long attrs) |
209 | { | 209 | { |
210 | dma_addr_t dev_addr; | 210 | dma_addr_t dev_addr; |
211 | 211 | ||
@@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | |||
219 | 219 | ||
220 | static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | 220 | static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, |
221 | size_t size, enum dma_data_direction dir, | 221 | size_t size, enum dma_data_direction dir, |
222 | struct dma_attrs *attrs) | 222 | unsigned long attrs) |
223 | { | 223 | { |
224 | if (!is_device_dma_coherent(dev)) | 224 | if (!is_device_dma_coherent(dev)) |
225 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 225 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); |
@@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
228 | 228 | ||
229 | static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 229 | static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
230 | int nelems, enum dma_data_direction dir, | 230 | int nelems, enum dma_data_direction dir, |
231 | struct dma_attrs *attrs) | 231 | unsigned long attrs) |
232 | { | 232 | { |
233 | struct scatterlist *sg; | 233 | struct scatterlist *sg; |
234 | int i, ret; | 234 | int i, ret; |
@@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
245 | static void __swiotlb_unmap_sg_attrs(struct device *dev, | 245 | static void __swiotlb_unmap_sg_attrs(struct device *dev, |
246 | struct scatterlist *sgl, int nelems, | 246 | struct scatterlist *sgl, int nelems, |
247 | enum dma_data_direction dir, | 247 | enum dma_data_direction dir, |
248 | struct dma_attrs *attrs) | 248 | unsigned long attrs) |
249 | { | 249 | { |
250 | struct scatterlist *sg; | 250 | struct scatterlist *sg; |
251 | int i; | 251 | int i; |
@@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, | |||
306 | static int __swiotlb_mmap(struct device *dev, | 306 | static int __swiotlb_mmap(struct device *dev, |
307 | struct vm_area_struct *vma, | 307 | struct vm_area_struct *vma, |
308 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 308 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
309 | struct dma_attrs *attrs) | 309 | unsigned long attrs) |
310 | { | 310 | { |
311 | int ret = -ENXIO; | 311 | int ret = -ENXIO; |
312 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> | 312 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> |
@@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev, | |||
333 | 333 | ||
334 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | 334 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
335 | void *cpu_addr, dma_addr_t handle, size_t size, | 335 | void *cpu_addr, dma_addr_t handle, size_t size, |
336 | struct dma_attrs *attrs) | 336 | unsigned long attrs) |
337 | { | 337 | { |
338 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 338 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
339 | 339 | ||
@@ -435,21 +435,21 @@ out: | |||
435 | 435 | ||
436 | static void *__dummy_alloc(struct device *dev, size_t size, | 436 | static void *__dummy_alloc(struct device *dev, size_t size, |
437 | dma_addr_t *dma_handle, gfp_t flags, | 437 | dma_addr_t *dma_handle, gfp_t flags, |
438 | struct dma_attrs *attrs) | 438 | unsigned long attrs) |
439 | { | 439 | { |
440 | return NULL; | 440 | return NULL; |
441 | } | 441 | } |
442 | 442 | ||
443 | static void __dummy_free(struct device *dev, size_t size, | 443 | static void __dummy_free(struct device *dev, size_t size, |
444 | void *vaddr, dma_addr_t dma_handle, | 444 | void *vaddr, dma_addr_t dma_handle, |
445 | struct dma_attrs *attrs) | 445 | unsigned long attrs) |
446 | { | 446 | { |
447 | } | 447 | } |
448 | 448 | ||
449 | static int __dummy_mmap(struct device *dev, | 449 | static int __dummy_mmap(struct device *dev, |
450 | struct vm_area_struct *vma, | 450 | struct vm_area_struct *vma, |
451 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 451 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
452 | struct dma_attrs *attrs) | 452 | unsigned long attrs) |
453 | { | 453 | { |
454 | return -ENXIO; | 454 | return -ENXIO; |
455 | } | 455 | } |
@@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev, | |||
457 | static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, | 457 | static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, |
458 | unsigned long offset, size_t size, | 458 | unsigned long offset, size_t size, |
459 | enum dma_data_direction dir, | 459 | enum dma_data_direction dir, |
460 | struct dma_attrs *attrs) | 460 | unsigned long attrs) |
461 | { | 461 | { |
462 | return DMA_ERROR_CODE; | 462 | return DMA_ERROR_CODE; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, | 465 | static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, |
466 | size_t size, enum dma_data_direction dir, | 466 | size_t size, enum dma_data_direction dir, |
467 | struct dma_attrs *attrs) | 467 | unsigned long attrs) |
468 | { | 468 | { |
469 | } | 469 | } |
470 | 470 | ||
471 | static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, | 471 | static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, |
472 | int nelems, enum dma_data_direction dir, | 472 | int nelems, enum dma_data_direction dir, |
473 | struct dma_attrs *attrs) | 473 | unsigned long attrs) |
474 | { | 474 | { |
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
@@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, | |||
478 | static void __dummy_unmap_sg(struct device *dev, | 478 | static void __dummy_unmap_sg(struct device *dev, |
479 | struct scatterlist *sgl, int nelems, | 479 | struct scatterlist *sgl, int nelems, |
480 | enum dma_data_direction dir, | 480 | enum dma_data_direction dir, |
481 | struct dma_attrs *attrs) | 481 | unsigned long attrs) |
482 | { | 482 | { |
483 | } | 483 | } |
484 | 484 | ||
@@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) | |||
553 | 553 | ||
554 | static void *__iommu_alloc_attrs(struct device *dev, size_t size, | 554 | static void *__iommu_alloc_attrs(struct device *dev, size_t size, |
555 | dma_addr_t *handle, gfp_t gfp, | 555 | dma_addr_t *handle, gfp_t gfp, |
556 | struct dma_attrs *attrs) | 556 | unsigned long attrs) |
557 | { | 557 | { |
558 | bool coherent = is_device_dma_coherent(dev); | 558 | bool coherent = is_device_dma_coherent(dev); |
559 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); | 559 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); |
@@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
613 | } | 613 | } |
614 | 614 | ||
615 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 615 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
616 | dma_addr_t handle, struct dma_attrs *attrs) | 616 | dma_addr_t handle, unsigned long attrs) |
617 | { | 617 | { |
618 | size_t iosize = size; | 618 | size_t iosize = size; |
619 | 619 | ||
@@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
629 | * Hence how dodgy the below logic looks... | 629 | * Hence how dodgy the below logic looks... |
630 | */ | 630 | */ |
631 | if (__in_atomic_pool(cpu_addr, size)) { | 631 | if (__in_atomic_pool(cpu_addr, size)) { |
632 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); | 632 | iommu_dma_unmap_page(dev, handle, iosize, 0, 0); |
633 | __free_from_pool(cpu_addr, size); | 633 | __free_from_pool(cpu_addr, size); |
634 | } else if (is_vmalloc_addr(cpu_addr)){ | 634 | } else if (is_vmalloc_addr(cpu_addr)){ |
635 | struct vm_struct *area = find_vm_area(cpu_addr); | 635 | struct vm_struct *area = find_vm_area(cpu_addr); |
@@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
639 | iommu_dma_free(dev, area->pages, iosize, &handle); | 639 | iommu_dma_free(dev, area->pages, iosize, &handle); |
640 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | 640 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); |
641 | } else { | 641 | } else { |
642 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); | 642 | iommu_dma_unmap_page(dev, handle, iosize, 0, 0); |
643 | __free_pages(virt_to_page(cpu_addr), get_order(size)); | 643 | __free_pages(virt_to_page(cpu_addr), get_order(size)); |
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 647 | static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
648 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 648 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
649 | struct dma_attrs *attrs) | 649 | unsigned long attrs) |
650 | { | 650 | { |
651 | struct vm_struct *area; | 651 | struct vm_struct *area; |
652 | int ret; | 652 | int ret; |
@@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |||
666 | 666 | ||
667 | static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | 667 | static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, |
668 | void *cpu_addr, dma_addr_t dma_addr, | 668 | void *cpu_addr, dma_addr_t dma_addr, |
669 | size_t size, struct dma_attrs *attrs) | 669 | size_t size, unsigned long attrs) |
670 | { | 670 | { |
671 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 671 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
672 | struct vm_struct *area = find_vm_area(cpu_addr); | 672 | struct vm_struct *area = find_vm_area(cpu_addr); |
@@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev, | |||
707 | static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, | 707 | static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, |
708 | unsigned long offset, size_t size, | 708 | unsigned long offset, size_t size, |
709 | enum dma_data_direction dir, | 709 | enum dma_data_direction dir, |
710 | struct dma_attrs *attrs) | 710 | unsigned long attrs) |
711 | { | 711 | { |
712 | bool coherent = is_device_dma_coherent(dev); | 712 | bool coherent = is_device_dma_coherent(dev); |
713 | int prot = dma_direction_to_prot(dir, coherent); | 713 | int prot = dma_direction_to_prot(dir, coherent); |
714 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); | 714 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); |
715 | 715 | ||
716 | if (!iommu_dma_mapping_error(dev, dev_addr) && | 716 | if (!iommu_dma_mapping_error(dev, dev_addr) && |
717 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 717 | (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
718 | __iommu_sync_single_for_device(dev, dev_addr, size, dir); | 718 | __iommu_sync_single_for_device(dev, dev_addr, size, dir); |
719 | 719 | ||
720 | return dev_addr; | 720 | return dev_addr; |
@@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, | |||
722 | 722 | ||
723 | static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, | 723 | static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, |
724 | size_t size, enum dma_data_direction dir, | 724 | size_t size, enum dma_data_direction dir, |
725 | struct dma_attrs *attrs) | 725 | unsigned long attrs) |
726 | { | 726 | { |
727 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 727 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
728 | __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); | 728 | __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); |
729 | 729 | ||
730 | iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); | 730 | iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); |
@@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev, | |||
760 | 760 | ||
761 | static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 761 | static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
762 | int nelems, enum dma_data_direction dir, | 762 | int nelems, enum dma_data_direction dir, |
763 | struct dma_attrs *attrs) | 763 | unsigned long attrs) |
764 | { | 764 | { |
765 | bool coherent = is_device_dma_coherent(dev); | 765 | bool coherent = is_device_dma_coherent(dev); |
766 | 766 | ||
767 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 767 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
768 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); | 768 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); |
769 | 769 | ||
770 | return iommu_dma_map_sg(dev, sgl, nelems, | 770 | return iommu_dma_map_sg(dev, sgl, nelems, |
@@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
774 | static void __iommu_unmap_sg_attrs(struct device *dev, | 774 | static void __iommu_unmap_sg_attrs(struct device *dev, |
775 | struct scatterlist *sgl, int nelems, | 775 | struct scatterlist *sgl, int nelems, |
776 | enum dma_data_direction dir, | 776 | enum dma_data_direction dir, |
777 | struct dma_attrs *attrs) | 777 | unsigned long attrs) |
778 | { | 778 | { |
779 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 779 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
780 | __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); | 780 | __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); |
781 | 781 | ||
782 | iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); | 782 | iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); |
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 92cf1fb2b3e6..58610d0df7ed 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c | |||
@@ -99,7 +99,7 @@ static void __dma_free(struct device *dev, size_t size, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static void *avr32_dma_alloc(struct device *dev, size_t size, | 101 | static void *avr32_dma_alloc(struct device *dev, size_t size, |
102 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 102 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
103 | { | 103 | { |
104 | struct page *page; | 104 | struct page *page; |
105 | dma_addr_t phys; | 105 | dma_addr_t phys; |
@@ -109,7 +109,7 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, | |||
109 | return NULL; | 109 | return NULL; |
110 | phys = page_to_phys(page); | 110 | phys = page_to_phys(page); |
111 | 111 | ||
112 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { | 112 | if (attrs & DMA_ATTR_WRITE_COMBINE) { |
113 | /* Now, map the page into P3 with write-combining turned on */ | 113 | /* Now, map the page into P3 with write-combining turned on */ |
114 | *handle = phys; | 114 | *handle = phys; |
115 | return __ioremap(phys, size, _PAGE_BUFFER); | 115 | return __ioremap(phys, size, _PAGE_BUFFER); |
@@ -119,11 +119,11 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static void avr32_dma_free(struct device *dev, size_t size, | 121 | static void avr32_dma_free(struct device *dev, size_t size, |
122 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 122 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
123 | { | 123 | { |
124 | struct page *page; | 124 | struct page *page; |
125 | 125 | ||
126 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { | 126 | if (attrs & DMA_ATTR_WRITE_COMBINE) { |
127 | iounmap(cpu_addr); | 127 | iounmap(cpu_addr); |
128 | 128 | ||
129 | page = phys_to_page(handle); | 129 | page = phys_to_page(handle); |
@@ -142,7 +142,7 @@ static void avr32_dma_free(struct device *dev, size_t size, | |||
142 | 142 | ||
143 | static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, | 143 | static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, |
144 | unsigned long offset, size_t size, | 144 | unsigned long offset, size_t size, |
145 | enum dma_data_direction direction, struct dma_attrs *attrs) | 145 | enum dma_data_direction direction, unsigned long attrs) |
146 | { | 146 | { |
147 | void *cpu_addr = page_address(page) + offset; | 147 | void *cpu_addr = page_address(page) + offset; |
148 | 148 | ||
@@ -152,7 +152,7 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, | |||
152 | 152 | ||
153 | static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 153 | static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
154 | int nents, enum dma_data_direction direction, | 154 | int nents, enum dma_data_direction direction, |
155 | struct dma_attrs *attrs) | 155 | unsigned long attrs) |
156 | { | 156 | { |
157 | int i; | 157 | int i; |
158 | struct scatterlist *sg; | 158 | struct scatterlist *sg; |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index 771afe6e4264..53fbbb61aa86 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -79,7 +79,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | static void *bfin_dma_alloc(struct device *dev, size_t size, | 81 | static void *bfin_dma_alloc(struct device *dev, size_t size, |
82 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 82 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
83 | { | 83 | { |
84 | void *ret; | 84 | void *ret; |
85 | 85 | ||
@@ -94,7 +94,7 @@ static void *bfin_dma_alloc(struct device *dev, size_t size, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, | 96 | static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, |
97 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 97 | dma_addr_t dma_handle, unsigned long attrs) |
98 | { | 98 | { |
99 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); | 99 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); |
100 | } | 100 | } |
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(__dma_sync); | |||
111 | 111 | ||
112 | static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, | 112 | static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, |
113 | int nents, enum dma_data_direction direction, | 113 | int nents, enum dma_data_direction direction, |
114 | struct dma_attrs *attrs) | 114 | unsigned long attrs) |
115 | { | 115 | { |
116 | struct scatterlist *sg; | 116 | struct scatterlist *sg; |
117 | int i; | 117 | int i; |
@@ -139,7 +139,7 @@ static void bfin_dma_sync_sg_for_device(struct device *dev, | |||
139 | 139 | ||
140 | static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, | 140 | static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, |
141 | unsigned long offset, size_t size, enum dma_data_direction dir, | 141 | unsigned long offset, size_t size, enum dma_data_direction dir, |
142 | struct dma_attrs *attrs) | 142 | unsigned long attrs) |
143 | { | 143 | { |
144 | dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); | 144 | dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); |
145 | 145 | ||
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 6b5cd7b0cf32..5717b1e52d96 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
@@ -26,8 +26,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
26 | 26 | ||
27 | extern void coherent_mem_init(u32 start, u32 size); | 27 | extern void coherent_mem_init(u32 start, u32 size); |
28 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 28 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
29 | gfp_t gfp, struct dma_attrs *attrs); | 29 | gfp_t gfp, unsigned long attrs); |
30 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | 30 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
31 | dma_addr_t dma_handle, struct dma_attrs *attrs); | 31 | dma_addr_t dma_handle, unsigned long attrs); |
32 | 32 | ||
33 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 33 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index 8a80f3a250c0..db4a6a301f5e 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c | |||
@@ -38,7 +38,7 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size, | |||
38 | 38 | ||
39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | 39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, |
40 | unsigned long offset, size_t size, enum dma_data_direction dir, | 40 | unsigned long offset, size_t size, enum dma_data_direction dir, |
41 | struct dma_attrs *attrs) | 41 | unsigned long attrs) |
42 | { | 42 | { |
43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); | 43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); |
44 | 44 | ||
@@ -47,13 +47,13 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | |||
47 | } | 47 | } |
48 | 48 | ||
49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, | 49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, |
50 | size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) | 50 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
51 | { | 51 | { |
52 | c6x_dma_sync(handle, size, dir); | 52 | c6x_dma_sync(handle, size, dir); |
53 | } | 53 | } |
54 | 54 | ||
55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
56 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 56 | int nents, enum dma_data_direction dir, unsigned long attrs) |
57 | { | 57 | { |
58 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
59 | int i; | 59 | int i; |
@@ -67,8 +67,7 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
70 | int nents, enum dma_data_direction dir, | 70 | int nents, enum dma_data_direction dir, unsigned long attrs) |
71 | struct dma_attrs *attrs) | ||
72 | { | 71 | { |
73 | struct scatterlist *sg; | 72 | struct scatterlist *sg; |
74 | int i; | 73 | int i; |
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index f7ee63af2541..95e38ad27c69 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c | |||
@@ -74,7 +74,7 @@ static void __free_dma_pages(u32 addr, int order) | |||
74 | * virtual and DMA address for that space. | 74 | * virtual and DMA address for that space. |
75 | */ | 75 | */ |
76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
77 | gfp_t gfp, struct dma_attrs *attrs) | 77 | gfp_t gfp, unsigned long attrs) |
78 | { | 78 | { |
79 | u32 paddr; | 79 | u32 paddr; |
80 | int order; | 80 | int order; |
@@ -99,7 +99,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
99 | * Free DMA coherent memory as defined by the above mapping. | 99 | * Free DMA coherent memory as defined by the above mapping. |
100 | */ | 100 | */ |
101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | 101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
102 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 102 | dma_addr_t dma_handle, unsigned long attrs) |
103 | { | 103 | { |
104 | int order; | 104 | int order; |
105 | 105 | ||
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 8d5efa58cce1..1f0636793f0c 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | static void *v32_dma_alloc(struct device *dev, size_t size, | 19 | static void *v32_dma_alloc(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 20 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
21 | { | 21 | { |
22 | void *ret; | 22 | void *ret; |
23 | 23 | ||
@@ -37,22 +37,21 @@ static void *v32_dma_alloc(struct device *dev, size_t size, | |||
37 | } | 37 | } |
38 | 38 | ||
39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, | 39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, |
40 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 40 | dma_addr_t dma_handle, unsigned long attrs) |
41 | { | 41 | { |
42 | free_pages((unsigned long)vaddr, get_order(size)); | 42 | free_pages((unsigned long)vaddr, get_order(size)); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, | 45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, |
46 | struct page *page, unsigned long offset, size_t size, | 46 | struct page *page, unsigned long offset, size_t size, |
47 | enum dma_data_direction direction, | 47 | enum dma_data_direction direction, unsigned long attrs) |
48 | struct dma_attrs *attrs) | ||
49 | { | 48 | { |
50 | return page_to_phys(page) + offset; | 49 | return page_to_phys(page) + offset; |
51 | } | 50 | } |
52 | 51 | ||
53 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, | 52 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, |
54 | int nents, enum dma_data_direction direction, | 53 | int nents, enum dma_data_direction direction, |
55 | struct dma_attrs *attrs) | 54 | unsigned long attrs) |
56 | { | 55 | { |
57 | printk("Map sg\n"); | 56 | printk("Map sg\n"); |
58 | return nents; | 57 | return nents; |
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 082be49b5df0..90f2e4cb33d6 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c | |||
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(dma_alloc_lock); | |||
35 | static LIST_HEAD(dma_alloc_list); | 35 | static LIST_HEAD(dma_alloc_list); |
36 | 36 | ||
37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, | 37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
38 | gfp_t gfp, struct dma_attrs *attrs) | 38 | gfp_t gfp, unsigned long attrs) |
39 | { | 39 | { |
40 | struct dma_alloc_record *new; | 40 | struct dma_alloc_record *new; |
41 | struct list_head *this = &dma_alloc_list; | 41 | struct list_head *this = &dma_alloc_list; |
@@ -86,7 +86,7 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_ha | |||
86 | } | 86 | } |
87 | 87 | ||
88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | 88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
89 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 89 | dma_addr_t dma_handle, unsigned long attrs) |
90 | { | 90 | { |
91 | struct dma_alloc_record *rec; | 91 | struct dma_alloc_record *rec; |
92 | unsigned long flags; | 92 | unsigned long flags; |
@@ -107,7 +107,7 @@ static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | |||
107 | 107 | ||
108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
109 | int nents, enum dma_data_direction direction, | 109 | int nents, enum dma_data_direction direction, |
110 | struct dma_attrs *attrs) | 110 | unsigned long attrs) |
111 | { | 111 | { |
112 | int i; | 112 | int i; |
113 | struct scatterlist *sg; | 113 | struct scatterlist *sg; |
@@ -124,7 +124,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
124 | 124 | ||
125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, | 125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
126 | unsigned long offset, size_t size, | 126 | unsigned long offset, size_t size, |
127 | enum dma_data_direction direction, struct dma_attrs *attrs) | 127 | enum dma_data_direction direction, unsigned long attrs) |
128 | { | 128 | { |
129 | BUG_ON(direction == DMA_NONE); | 129 | BUG_ON(direction == DMA_NONE); |
130 | flush_dcache_page(page); | 130 | flush_dcache_page(page); |
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 316b7b65348d..f585745b1abc 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c | |||
@@ -19,8 +19,7 @@ | |||
19 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | 20 | ||
21 | static void *frv_dma_alloc(struct device *hwdev, size_t size, | 21 | static void *frv_dma_alloc(struct device *hwdev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp, | 22 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
23 | struct dma_attrs *attrs) | ||
24 | { | 23 | { |
25 | void *ret; | 24 | void *ret; |
26 | 25 | ||
@@ -32,14 +31,14 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, | |||
32 | } | 31 | } |
33 | 32 | ||
34 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | 33 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
35 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 34 | dma_addr_t dma_handle, unsigned long attrs) |
36 | { | 35 | { |
37 | consistent_free(vaddr); | 36 | consistent_free(vaddr); |
38 | } | 37 | } |
39 | 38 | ||
40 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 39 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
41 | int nents, enum dma_data_direction direction, | 40 | int nents, enum dma_data_direction direction, |
42 | struct dma_attrs *attrs) | 41 | unsigned long attrs) |
43 | { | 42 | { |
44 | unsigned long dampr2; | 43 | unsigned long dampr2; |
45 | void *vaddr; | 44 | void *vaddr; |
@@ -69,7 +68,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
69 | 68 | ||
70 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, | 69 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
71 | unsigned long offset, size_t size, | 70 | unsigned long offset, size_t size, |
72 | enum dma_data_direction direction, struct dma_attrs *attrs) | 71 | enum dma_data_direction direction, unsigned long attrs) |
73 | { | 72 | { |
74 | flush_dcache_page(page); | 73 | flush_dcache_page(page); |
75 | return (dma_addr_t) page_to_phys(page) + offset; | 74 | return (dma_addr_t) page_to_phys(page) + offset; |
diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c index eeb13d3f2424..3651da045806 100644 --- a/arch/h8300/kernel/dma.c +++ b/arch/h8300/kernel/dma.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static void *dma_alloc(struct device *dev, size_t size, | 13 | static void *dma_alloc(struct device *dev, size_t size, |
14 | dma_addr_t *dma_handle, gfp_t gfp, | 14 | dma_addr_t *dma_handle, gfp_t gfp, |
15 | struct dma_attrs *attrs) | 15 | unsigned long attrs) |
16 | { | 16 | { |
17 | void *ret; | 17 | void *ret; |
18 | 18 | ||
@@ -32,7 +32,7 @@ static void *dma_alloc(struct device *dev, size_t size, | |||
32 | 32 | ||
33 | static void dma_free(struct device *dev, size_t size, | 33 | static void dma_free(struct device *dev, size_t size, |
34 | void *vaddr, dma_addr_t dma_handle, | 34 | void *vaddr, dma_addr_t dma_handle, |
35 | struct dma_attrs *attrs) | 35 | unsigned long attrs) |
36 | 36 | ||
37 | { | 37 | { |
38 | free_pages((unsigned long)vaddr, get_order(size)); | 38 | free_pages((unsigned long)vaddr, get_order(size)); |
@@ -41,14 +41,14 @@ static void dma_free(struct device *dev, size_t size, | |||
41 | static dma_addr_t map_page(struct device *dev, struct page *page, | 41 | static dma_addr_t map_page(struct device *dev, struct page *page, |
42 | unsigned long offset, size_t size, | 42 | unsigned long offset, size_t size, |
43 | enum dma_data_direction direction, | 43 | enum dma_data_direction direction, |
44 | struct dma_attrs *attrs) | 44 | unsigned long attrs) |
45 | { | 45 | { |
46 | return page_to_phys(page) + offset; | 46 | return page_to_phys(page) + offset; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int map_sg(struct device *dev, struct scatterlist *sgl, | 49 | static int map_sg(struct device *dev, struct scatterlist *sgl, |
50 | int nents, enum dma_data_direction direction, | 50 | int nents, enum dma_data_direction direction, |
51 | struct dma_attrs *attrs) | 51 | unsigned long attrs) |
52 | { | 52 | { |
53 | struct scatterlist *sg; | 53 | struct scatterlist *sg; |
54 | int i; | 54 | int i; |
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index aa6203464520..7ef58df909fc 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/dma-debug.h> | 28 | #include <linux/dma-debug.h> |
29 | #include <linux/dma-attrs.h> | ||
30 | #include <asm/io.h> | 29 | #include <asm/io.h> |
31 | 30 | ||
32 | struct device; | 31 | struct device; |
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 9e3ddf792bd3..b9017785fb71 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c | |||
@@ -51,7 +51,7 @@ static struct gen_pool *coherent_pool; | |||
51 | 51 | ||
52 | static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, | 52 | static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, |
53 | dma_addr_t *dma_addr, gfp_t flag, | 53 | dma_addr_t *dma_addr, gfp_t flag, |
54 | struct dma_attrs *attrs) | 54 | unsigned long attrs) |
55 | { | 55 | { |
56 | void *ret; | 56 | void *ret; |
57 | 57 | ||
@@ -84,7 +84,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, | 86 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, |
87 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 87 | dma_addr_t dma_addr, unsigned long attrs) |
88 | { | 88 | { |
89 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); | 89 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); |
90 | } | 90 | } |
@@ -105,7 +105,7 @@ static int check_addr(const char *name, struct device *hwdev, | |||
105 | 105 | ||
106 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, | 106 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, |
107 | int nents, enum dma_data_direction dir, | 107 | int nents, enum dma_data_direction dir, |
108 | struct dma_attrs *attrs) | 108 | unsigned long attrs) |
109 | { | 109 | { |
110 | struct scatterlist *s; | 110 | struct scatterlist *s; |
111 | int i; | 111 | int i; |
@@ -172,7 +172,7 @@ static inline void dma_sync(void *addr, size_t size, | |||
172 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, | 172 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, |
173 | unsigned long offset, size_t size, | 173 | unsigned long offset, size_t size, |
174 | enum dma_data_direction dir, | 174 | enum dma_data_direction dir, |
175 | struct dma_attrs *attrs) | 175 | unsigned long attrs) |
176 | { | 176 | { |
177 | dma_addr_t bus = page_to_phys(page) + offset; | 177 | dma_addr_t bus = page_to_phys(page) + offset; |
178 | WARN_ON(size == 0); | 178 | WARN_ON(size == 0); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a6d6190c9d24..630ee8073899 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -919,7 +919,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
919 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, | 919 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
920 | unsigned long poff, size_t size, | 920 | unsigned long poff, size_t size, |
921 | enum dma_data_direction dir, | 921 | enum dma_data_direction dir, |
922 | struct dma_attrs *attrs) | 922 | unsigned long attrs) |
923 | { | 923 | { |
924 | struct ioc *ioc; | 924 | struct ioc *ioc; |
925 | void *addr = page_address(page) + poff; | 925 | void *addr = page_address(page) + poff; |
@@ -1005,7 +1005,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, | |||
1005 | 1005 | ||
1006 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, | 1006 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, |
1007 | size_t size, enum dma_data_direction dir, | 1007 | size_t size, enum dma_data_direction dir, |
1008 | struct dma_attrs *attrs) | 1008 | unsigned long attrs) |
1009 | { | 1009 | { |
1010 | return sba_map_page(dev, virt_to_page(addr), | 1010 | return sba_map_page(dev, virt_to_page(addr), |
1011 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); | 1011 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); |
@@ -1046,7 +1046,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1046 | * See Documentation/DMA-API-HOWTO.txt | 1046 | * See Documentation/DMA-API-HOWTO.txt |
1047 | */ | 1047 | */ |
1048 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | 1048 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
1049 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1049 | enum dma_data_direction dir, unsigned long attrs) |
1050 | { | 1050 | { |
1051 | struct ioc *ioc; | 1051 | struct ioc *ioc; |
1052 | #if DELAYED_RESOURCE_CNT > 0 | 1052 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1115,7 +1115,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | |||
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | 1117 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
1118 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1118 | enum dma_data_direction dir, unsigned long attrs) |
1119 | { | 1119 | { |
1120 | sba_unmap_page(dev, iova, size, dir, attrs); | 1120 | sba_unmap_page(dev, iova, size, dir, attrs); |
1121 | } | 1121 | } |
@@ -1130,7 +1130,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | |||
1130 | */ | 1130 | */ |
1131 | static void * | 1131 | static void * |
1132 | sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 1132 | sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
1133 | gfp_t flags, struct dma_attrs *attrs) | 1133 | gfp_t flags, unsigned long attrs) |
1134 | { | 1134 | { |
1135 | struct ioc *ioc; | 1135 | struct ioc *ioc; |
1136 | void *addr; | 1136 | void *addr; |
@@ -1175,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
1175 | * device to map single to get an iova mapping. | 1175 | * device to map single to get an iova mapping. |
1176 | */ | 1176 | */ |
1177 | *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, | 1177 | *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, |
1178 | size, 0, NULL); | 1178 | size, 0, 0); |
1179 | 1179 | ||
1180 | return addr; | 1180 | return addr; |
1181 | } | 1181 | } |
@@ -1191,9 +1191,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
1191 | * See Documentation/DMA-API-HOWTO.txt | 1191 | * See Documentation/DMA-API-HOWTO.txt |
1192 | */ | 1192 | */ |
1193 | static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, | 1193 | static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, |
1194 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 1194 | dma_addr_t dma_handle, unsigned long attrs) |
1195 | { | 1195 | { |
1196 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); | 1196 | sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); |
1197 | free_pages((unsigned long) vaddr, get_order(size)); | 1197 | free_pages((unsigned long) vaddr, get_order(size)); |
1198 | } | 1198 | } |
1199 | 1199 | ||
@@ -1442,7 +1442,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1442 | 1442 | ||
1443 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1443 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1444 | int nents, enum dma_data_direction dir, | 1444 | int nents, enum dma_data_direction dir, |
1445 | struct dma_attrs *attrs); | 1445 | unsigned long attrs); |
1446 | /** | 1446 | /** |
1447 | * sba_map_sg - map Scatter/Gather list | 1447 | * sba_map_sg - map Scatter/Gather list |
1448 | * @dev: instance of PCI owned by the driver that's asking. | 1448 | * @dev: instance of PCI owned by the driver that's asking. |
@@ -1455,7 +1455,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1455 | */ | 1455 | */ |
1456 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1456 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1457 | int nents, enum dma_data_direction dir, | 1457 | int nents, enum dma_data_direction dir, |
1458 | struct dma_attrs *attrs) | 1458 | unsigned long attrs) |
1459 | { | 1459 | { |
1460 | struct ioc *ioc; | 1460 | struct ioc *ioc; |
1461 | int coalesced, filled = 0; | 1461 | int coalesced, filled = 0; |
@@ -1551,7 +1551,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1551 | */ | 1551 | */ |
1552 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1552 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1553 | int nents, enum dma_data_direction dir, | 1553 | int nents, enum dma_data_direction dir, |
1554 | struct dma_attrs *attrs) | 1554 | unsigned long attrs) |
1555 | { | 1555 | { |
1556 | #ifdef ASSERT_PDIR_SANITY | 1556 | #ifdef ASSERT_PDIR_SANITY |
1557 | struct ioc *ioc; | 1557 | struct ioc *ioc; |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 9c39bdfc2da8..ed7f09089f12 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -22,7 +22,6 @@ struct pci_bus; | |||
22 | struct task_struct; | 22 | struct task_struct; |
23 | struct pci_dev; | 23 | struct pci_dev; |
24 | struct msi_desc; | 24 | struct msi_desc; |
25 | struct dma_attrs; | ||
26 | 25 | ||
27 | typedef void ia64_mv_setup_t (char **); | 26 | typedef void ia64_mv_setup_t (char **); |
28 | typedef void ia64_mv_cpu_init_t (void); | 27 | typedef void ia64_mv_cpu_init_t (void); |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 939260aeac98..2933208c0285 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb); | |||
16 | 16 | ||
17 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, | 17 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
18 | dma_addr_t *dma_handle, gfp_t gfp, | 18 | dma_addr_t *dma_handle, gfp_t gfp, |
19 | struct dma_attrs *attrs) | 19 | unsigned long attrs) |
20 | { | 20 | { |
21 | if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) | 21 | if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) |
22 | gfp |= GFP_DMA; | 22 | gfp |= GFP_DMA; |
@@ -25,7 +25,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, | |||
25 | 25 | ||
26 | static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, | 26 | static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, |
27 | void *vaddr, dma_addr_t dma_addr, | 27 | void *vaddr, dma_addr_t dma_addr, |
28 | struct dma_attrs *attrs) | 28 | unsigned long attrs) |
29 | { | 29 | { |
30 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 30 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
31 | } | 31 | } |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 8f59907007cb..74c934a997bb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(sn_dma_set_mask); | |||
77 | */ | 77 | */ |
78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | 78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t * dma_handle, gfp_t flags, | 79 | dma_addr_t * dma_handle, gfp_t flags, |
80 | struct dma_attrs *attrs) | 80 | unsigned long attrs) |
81 | { | 81 | { |
82 | void *cpuaddr; | 82 | void *cpuaddr; |
83 | unsigned long phys_addr; | 83 | unsigned long phys_addr; |
@@ -138,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
138 | * any associated IOMMU mappings. | 138 | * any associated IOMMU mappings. |
139 | */ | 139 | */ |
140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
141 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 141 | dma_addr_t dma_handle, unsigned long attrs) |
142 | { | 142 | { |
143 | struct pci_dev *pdev = to_pci_dev(dev); | 143 | struct pci_dev *pdev = to_pci_dev(dev); |
144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -176,21 +176,18 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | |||
176 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | 176 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
177 | unsigned long offset, size_t size, | 177 | unsigned long offset, size_t size, |
178 | enum dma_data_direction dir, | 178 | enum dma_data_direction dir, |
179 | struct dma_attrs *attrs) | 179 | unsigned long attrs) |
180 | { | 180 | { |
181 | void *cpu_addr = page_address(page) + offset; | 181 | void *cpu_addr = page_address(page) + offset; |
182 | dma_addr_t dma_addr; | 182 | dma_addr_t dma_addr; |
183 | unsigned long phys_addr; | 183 | unsigned long phys_addr; |
184 | struct pci_dev *pdev = to_pci_dev(dev); | 184 | struct pci_dev *pdev = to_pci_dev(dev); |
185 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 185 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
186 | int dmabarr; | ||
187 | |||
188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
189 | 186 | ||
190 | BUG_ON(!dev_is_pci(dev)); | 187 | BUG_ON(!dev_is_pci(dev)); |
191 | 188 | ||
192 | phys_addr = __pa(cpu_addr); | 189 | phys_addr = __pa(cpu_addr); |
193 | if (dmabarr) | 190 | if (attrs & DMA_ATTR_WRITE_BARRIER) |
194 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, | 191 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, |
195 | size, SN_DMA_ADDR_PHYS); | 192 | size, SN_DMA_ADDR_PHYS); |
196 | else | 193 | else |
@@ -218,7 +215,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | |||
218 | */ | 215 | */ |
219 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
220 | size_t size, enum dma_data_direction dir, | 217 | size_t size, enum dma_data_direction dir, |
221 | struct dma_attrs *attrs) | 218 | unsigned long attrs) |
222 | { | 219 | { |
223 | struct pci_dev *pdev = to_pci_dev(dev); | 220 | struct pci_dev *pdev = to_pci_dev(dev); |
224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -240,7 +237,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
240 | */ | 237 | */ |
241 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
242 | int nhwentries, enum dma_data_direction dir, | 239 | int nhwentries, enum dma_data_direction dir, |
243 | struct dma_attrs *attrs) | 240 | unsigned long attrs) |
244 | { | 241 | { |
245 | int i; | 242 | int i; |
246 | struct pci_dev *pdev = to_pci_dev(dev); | 243 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -273,16 +270,13 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
273 | */ | 270 | */ |
274 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | 271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
275 | int nhwentries, enum dma_data_direction dir, | 272 | int nhwentries, enum dma_data_direction dir, |
276 | struct dma_attrs *attrs) | 273 | unsigned long attrs) |
277 | { | 274 | { |
278 | unsigned long phys_addr; | 275 | unsigned long phys_addr; |
279 | struct scatterlist *saved_sg = sgl, *sg; | 276 | struct scatterlist *saved_sg = sgl, *sg; |
280 | struct pci_dev *pdev = to_pci_dev(dev); | 277 | struct pci_dev *pdev = to_pci_dev(dev); |
281 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 278 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
282 | int i; | 279 | int i; |
283 | int dmabarr; | ||
284 | |||
285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
286 | 280 | ||
287 | BUG_ON(!dev_is_pci(dev)); | 281 | BUG_ON(!dev_is_pci(dev)); |
288 | 282 | ||
@@ -292,7 +286,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
292 | for_each_sg(sgl, sg, nhwentries, i) { | 286 | for_each_sg(sgl, sg, nhwentries, i) { |
293 | dma_addr_t dma_addr; | 287 | dma_addr_t dma_addr; |
294 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 288 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
295 | if (dmabarr) | 289 | if (attrs & DMA_ATTR_WRITE_BARRIER) |
296 | dma_addr = provider->dma_map_consistent(pdev, | 290 | dma_addr = provider->dma_map_consistent(pdev, |
297 | phys_addr, | 291 | phys_addr, |
298 | sg->length, | 292 | sg->length, |
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index cbc78b4117b5..8cf97cbadc91 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) | 19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
20 | 20 | ||
21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
22 | gfp_t flag, struct dma_attrs *attrs) | 22 | gfp_t flag, unsigned long attrs) |
23 | { | 23 | { |
24 | struct page *page, **map; | 24 | struct page *page, **map; |
25 | pgprot_t pgprot; | 25 | pgprot_t pgprot; |
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, | 64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, |
65 | dma_addr_t handle, struct dma_attrs *attrs) | 65 | dma_addr_t handle, unsigned long attrs) |
66 | { | 66 | { |
67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | 67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); |
68 | vfree(addr); | 68 | vfree(addr); |
@@ -73,7 +73,7 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr, | |||
73 | #include <asm/cacheflush.h> | 73 | #include <asm/cacheflush.h> |
74 | 74 | ||
75 | static void *m68k_dma_alloc(struct device *dev, size_t size, | 75 | static void *m68k_dma_alloc(struct device *dev, size_t size, |
76 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 76 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
77 | { | 77 | { |
78 | void *ret; | 78 | void *ret; |
79 | /* ignore region specifiers */ | 79 | /* ignore region specifiers */ |
@@ -91,7 +91,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, | |||
91 | } | 91 | } |
92 | 92 | ||
93 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, | 93 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, |
94 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 94 | dma_addr_t dma_handle, unsigned long attrs) |
95 | { | 95 | { |
96 | free_pages((unsigned long)vaddr, get_order(size)); | 96 | free_pages((unsigned long)vaddr, get_order(size)); |
97 | } | 97 | } |
@@ -130,7 +130,7 @@ static void m68k_dma_sync_sg_for_device(struct device *dev, | |||
130 | 130 | ||
131 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, | 131 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, |
132 | unsigned long offset, size_t size, enum dma_data_direction dir, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | dma_addr_t handle = page_to_phys(page) + offset; | 135 | dma_addr_t handle = page_to_phys(page) + offset; |
136 | 136 | ||
@@ -139,7 +139,7 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
142 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 142 | int nents, enum dma_data_direction dir, unsigned long attrs) |
143 | { | 143 | { |
144 | int i; | 144 | int i; |
145 | struct scatterlist *sg; | 145 | struct scatterlist *sg; |
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index e12368d02155..0db31e24c541 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c | |||
@@ -172,7 +172,7 @@ out: | |||
172 | * virtual and bus address for that space. | 172 | * virtual and bus address for that space. |
173 | */ | 173 | */ |
174 | static void *metag_dma_alloc(struct device *dev, size_t size, | 174 | static void *metag_dma_alloc(struct device *dev, size_t size, |
175 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 175 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
176 | { | 176 | { |
177 | struct page *page; | 177 | struct page *page; |
178 | struct metag_vm_region *c; | 178 | struct metag_vm_region *c; |
@@ -268,7 +268,7 @@ no_page: | |||
268 | * free a page as defined by the above mapping. | 268 | * free a page as defined by the above mapping. |
269 | */ | 269 | */ |
270 | static void metag_dma_free(struct device *dev, size_t size, void *vaddr, | 270 | static void metag_dma_free(struct device *dev, size_t size, void *vaddr, |
271 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 271 | dma_addr_t dma_handle, unsigned long attrs) |
272 | { | 272 | { |
273 | struct metag_vm_region *c; | 273 | struct metag_vm_region *c; |
274 | unsigned long flags, addr; | 274 | unsigned long flags, addr; |
@@ -331,13 +331,13 @@ no_area: | |||
331 | 331 | ||
332 | static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 332 | static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
333 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 333 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
334 | struct dma_attrs *attrs) | 334 | unsigned long attrs) |
335 | { | 335 | { |
336 | unsigned long flags, user_size, kern_size; | 336 | unsigned long flags, user_size, kern_size; |
337 | struct metag_vm_region *c; | 337 | struct metag_vm_region *c; |
338 | int ret = -ENXIO; | 338 | int ret = -ENXIO; |
339 | 339 | ||
340 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 340 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
341 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 341 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
342 | else | 342 | else |
343 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 343 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
@@ -482,7 +482,7 @@ static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) | |||
482 | 482 | ||
483 | static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, | 483 | static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, |
484 | unsigned long offset, size_t size, | 484 | unsigned long offset, size_t size, |
485 | enum dma_data_direction direction, struct dma_attrs *attrs) | 485 | enum dma_data_direction direction, unsigned long attrs) |
486 | { | 486 | { |
487 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, | 487 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, |
488 | direction); | 488 | direction); |
@@ -491,14 +491,14 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, | |||
491 | 491 | ||
492 | static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 492 | static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
493 | size_t size, enum dma_data_direction direction, | 493 | size_t size, enum dma_data_direction direction, |
494 | struct dma_attrs *attrs) | 494 | unsigned long attrs) |
495 | { | 495 | { |
496 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | 496 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
497 | } | 497 | } |
498 | 498 | ||
499 | static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 499 | static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
500 | int nents, enum dma_data_direction direction, | 500 | int nents, enum dma_data_direction direction, |
501 | struct dma_attrs *attrs) | 501 | unsigned long attrs) |
502 | { | 502 | { |
503 | struct scatterlist *sg; | 503 | struct scatterlist *sg; |
504 | int i; | 504 | int i; |
@@ -516,7 +516,7 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
516 | 516 | ||
517 | static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 517 | static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
518 | int nhwentries, enum dma_data_direction direction, | 518 | int nhwentries, enum dma_data_direction direction, |
519 | struct dma_attrs *attrs) | 519 | unsigned long attrs) |
520 | { | 520 | { |
521 | struct scatterlist *sg; | 521 | struct scatterlist *sg; |
522 | int i; | 522 | int i; |
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 1884783d15c0..1768d4bdc8d3 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
27 | #include <linux/dma-debug.h> | 27 | #include <linux/dma-debug.h> |
28 | #include <linux/dma-attrs.h> | ||
29 | #include <asm/io.h> | 28 | #include <asm/io.h> |
30 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
31 | 30 | ||
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec229437..ec04dc1e2527 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | 18 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
19 | dma_addr_t *dma_handle, gfp_t flag, | 19 | dma_addr_t *dma_handle, gfp_t flag, |
20 | struct dma_attrs *attrs) | 20 | unsigned long attrs) |
21 | { | 21 | { |
22 | #ifdef NOT_COHERENT_CACHE | 22 | #ifdef NOT_COHERENT_CACHE |
23 | return consistent_alloc(flag, size, dma_handle); | 23 | return consistent_alloc(flag, size, dma_handle); |
@@ -42,7 +42,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
42 | 42 | ||
43 | static void dma_direct_free_coherent(struct device *dev, size_t size, | 43 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
44 | void *vaddr, dma_addr_t dma_handle, | 44 | void *vaddr, dma_addr_t dma_handle, |
45 | struct dma_attrs *attrs) | 45 | unsigned long attrs) |
46 | { | 46 | { |
47 | #ifdef NOT_COHERENT_CACHE | 47 | #ifdef NOT_COHERENT_CACHE |
48 | consistent_free(size, vaddr); | 48 | consistent_free(size, vaddr); |
@@ -53,7 +53,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, | |||
53 | 53 | ||
54 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 54 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
55 | int nents, enum dma_data_direction direction, | 55 | int nents, enum dma_data_direction direction, |
56 | struct dma_attrs *attrs) | 56 | unsigned long attrs) |
57 | { | 57 | { |
58 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
59 | int i; | 59 | int i; |
@@ -78,7 +78,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
78 | unsigned long offset, | 78 | unsigned long offset, |
79 | size_t size, | 79 | size_t size, |
80 | enum dma_data_direction direction, | 80 | enum dma_data_direction direction, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | __dma_sync(page_to_phys(page) + offset, size, direction); | 83 | __dma_sync(page_to_phys(page) + offset, size, direction); |
84 | return page_to_phys(page) + offset; | 84 | return page_to_phys(page) + offset; |
@@ -88,7 +88,7 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
88 | dma_addr_t dma_address, | 88 | dma_addr_t dma_address, |
89 | size_t size, | 89 | size_t size, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | /* There is not necessary to do cache cleanup | 93 | /* There is not necessary to do cache cleanup |
94 | * | 94 | * |
@@ -157,7 +157,7 @@ dma_direct_sync_sg_for_device(struct device *dev, | |||
157 | static | 157 | static |
158 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 158 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
159 | void *cpu_addr, dma_addr_t handle, size_t size, | 159 | void *cpu_addr, dma_addr_t handle, size_t size, |
160 | struct dma_attrs *attrs) | 160 | unsigned long attrs) |
161 | { | 161 | { |
162 | #ifdef CONFIG_MMU | 162 | #ifdef CONFIG_MMU |
163 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 163 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 2cd45f5f9481..fd69528b24fb 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c | |||
@@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev, | |||
125 | 125 | ||
126 | static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, | 126 | static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, |
127 | unsigned long offset, size_t size, enum dma_data_direction direction, | 127 | unsigned long offset, size_t size, enum dma_data_direction direction, |
128 | struct dma_attrs *attrs) | 128 | unsigned long attrs) |
129 | { | 129 | { |
130 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, | 130 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, |
131 | direction, attrs); | 131 | direction, attrs); |
@@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, | |||
135 | } | 135 | } |
136 | 136 | ||
137 | static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, | 137 | static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, |
138 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 138 | int nents, enum dma_data_direction direction, unsigned long attrs) |
139 | { | 139 | { |
140 | int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); | 140 | int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); |
141 | mb(); | 141 | mb(); |
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev, | |||
157 | } | 157 | } |
158 | 158 | ||
159 | static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, | 159 | static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, |
160 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 160 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
161 | { | 161 | { |
162 | void *ret; | 162 | void *ret; |
163 | 163 | ||
@@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, | |||
189 | } | 189 | } |
190 | 190 | ||
191 | static void octeon_dma_free_coherent(struct device *dev, size_t size, | 191 | static void octeon_dma_free_coherent(struct device *dev, size_t size, |
192 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 192 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
193 | { | 193 | { |
194 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 194 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
195 | } | 195 | } |
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 4ffa6fc81c8f..1a80b6f73ab2 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <dma-coherence.h> | 10 | #include <dma-coherence.h> |
11 | 11 | ||
12 | static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, | 12 | static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, |
13 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 13 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
14 | { | 14 | { |
15 | void *ret; | 15 | void *ret; |
16 | 16 | ||
@@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, | |||
41 | } | 41 | } |
42 | 42 | ||
43 | static void loongson_dma_free_coherent(struct device *dev, size_t size, | 43 | static void loongson_dma_free_coherent(struct device *dev, size_t size, |
44 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 44 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
45 | { | 45 | { |
46 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 46 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
47 | } | 47 | } |
@@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size, | |||
49 | static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, | 49 | static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, |
50 | unsigned long offset, size_t size, | 50 | unsigned long offset, size_t size, |
51 | enum dma_data_direction dir, | 51 | enum dma_data_direction dir, |
52 | struct dma_attrs *attrs) | 52 | unsigned long attrs) |
53 | { | 53 | { |
54 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, | 54 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, |
55 | dir, attrs); | 55 | dir, attrs); |
@@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, | |||
59 | 59 | ||
60 | static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, | 60 | static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, |
61 | int nents, enum dma_data_direction dir, | 61 | int nents, enum dma_data_direction dir, |
62 | struct dma_attrs *attrs) | 62 | unsigned long attrs) |
63 | { | 63 | { |
64 | int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL); | 64 | int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); |
65 | mb(); | 65 | mb(); |
66 | 66 | ||
67 | return r; | 67 | return r; |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index cb557d28cb21..b2eadd6fa9a1 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -131,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, | 133 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
134 | dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 134 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
135 | { | 135 | { |
136 | void *ret; | 136 | void *ret; |
137 | struct page *page = NULL; | 137 | struct page *page = NULL; |
@@ -141,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, | |||
141 | * XXX: seems like the coherent and non-coherent implementations could | 141 | * XXX: seems like the coherent and non-coherent implementations could |
142 | * be consolidated. | 142 | * be consolidated. |
143 | */ | 143 | */ |
144 | if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 144 | if (attrs & DMA_ATTR_NON_CONSISTENT) |
145 | return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 145 | return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); |
146 | 146 | ||
147 | gfp = massage_gfp_flags(dev, gfp); | 147 | gfp = massage_gfp_flags(dev, gfp); |
@@ -176,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size, | |||
176 | } | 176 | } |
177 | 177 | ||
178 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 178 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
179 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 179 | dma_addr_t dma_handle, unsigned long attrs) |
180 | { | 180 | { |
181 | unsigned long addr = (unsigned long) vaddr; | 181 | unsigned long addr = (unsigned long) vaddr; |
182 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 182 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
183 | struct page *page = NULL; | 183 | struct page *page = NULL; |
184 | 184 | ||
185 | if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 185 | if (attrs & DMA_ATTR_NON_CONSISTENT) { |
186 | mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); | 186 | mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); |
187 | return; | 187 | return; |
188 | } | 188 | } |
@@ -200,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
200 | 200 | ||
201 | static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 201 | static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
202 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 202 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
203 | struct dma_attrs *attrs) | 203 | unsigned long attrs) |
204 | { | 204 | { |
205 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 205 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
206 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 206 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
@@ -214,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
214 | 214 | ||
215 | pfn = page_to_pfn(virt_to_page((void *)addr)); | 215 | pfn = page_to_pfn(virt_to_page((void *)addr)); |
216 | 216 | ||
217 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 217 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
218 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 218 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
219 | else | 219 | else |
220 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 220 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
@@ -291,7 +291,7 @@ static inline void __dma_sync(struct page *page, | |||
291 | } | 291 | } |
292 | 292 | ||
293 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 293 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
294 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) | 294 | size_t size, enum dma_data_direction direction, unsigned long attrs) |
295 | { | 295 | { |
296 | if (cpu_needs_post_dma_flush(dev)) | 296 | if (cpu_needs_post_dma_flush(dev)) |
297 | __dma_sync(dma_addr_to_page(dev, dma_addr), | 297 | __dma_sync(dma_addr_to_page(dev, dma_addr), |
@@ -301,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
301 | } | 301 | } |
302 | 302 | ||
303 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 303 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
304 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 304 | int nents, enum dma_data_direction direction, unsigned long attrs) |
305 | { | 305 | { |
306 | int i; | 306 | int i; |
307 | struct scatterlist *sg; | 307 | struct scatterlist *sg; |
@@ -322,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
322 | 322 | ||
323 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, | 323 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
324 | unsigned long offset, size_t size, enum dma_data_direction direction, | 324 | unsigned long offset, size_t size, enum dma_data_direction direction, |
325 | struct dma_attrs *attrs) | 325 | unsigned long attrs) |
326 | { | 326 | { |
327 | if (!plat_device_is_coherent(dev)) | 327 | if (!plat_device_is_coherent(dev)) |
328 | __dma_sync(page, offset, size, direction); | 328 | __dma_sync(page, offset, size, direction); |
@@ -332,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, | |||
332 | 332 | ||
333 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 333 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
334 | int nhwentries, enum dma_data_direction direction, | 334 | int nhwentries, enum dma_data_direction direction, |
335 | struct dma_attrs *attrs) | 335 | unsigned long attrs) |
336 | { | 336 | { |
337 | int i; | 337 | int i; |
338 | struct scatterlist *sg; | 338 | struct scatterlist *sg; |
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index 3758715d4ab6..0630693bec2a 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c | |||
@@ -45,7 +45,7 @@ | |||
45 | static char *nlm_swiotlb; | 45 | static char *nlm_swiotlb; |
46 | 46 | ||
47 | static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, | 47 | static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, |
48 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 48 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
49 | { | 49 | { |
50 | /* ignore region specifiers */ | 50 | /* ignore region specifiers */ |
51 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 51 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
@@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static void nlm_dma_free_coherent(struct device *dev, size_t size, | 64 | static void nlm_dma_free_coherent(struct device *dev, size_t size, |
65 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 65 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
66 | { | 66 | { |
67 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 67 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
68 | } | 68 | } |
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 8842394cb49a..4f4b9029f0ea 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c | |||
@@ -21,7 +21,7 @@ | |||
21 | static unsigned long pci_sram_allocated = 0xbc000000; | 21 | static unsigned long pci_sram_allocated = 0xbc000000; |
22 | 22 | ||
23 | static void *mn10300_dma_alloc(struct device *dev, size_t size, | 23 | static void *mn10300_dma_alloc(struct device *dev, size_t size, |
24 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 24 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
25 | { | 25 | { |
26 | unsigned long addr; | 26 | unsigned long addr; |
27 | void *ret; | 27 | void *ret; |
@@ -63,7 +63,7 @@ done: | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, | 65 | static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, |
66 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 66 | dma_addr_t dma_handle, unsigned long attrs) |
67 | { | 67 | { |
68 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; | 68 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; |
69 | 69 | ||
@@ -75,7 +75,7 @@ static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, | |||
75 | 75 | ||
76 | static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 76 | static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
77 | int nents, enum dma_data_direction direction, | 77 | int nents, enum dma_data_direction direction, |
78 | struct dma_attrs *attrs) | 78 | unsigned long attrs) |
79 | { | 79 | { |
80 | struct scatterlist *sg; | 80 | struct scatterlist *sg; |
81 | int i; | 81 | int i; |
@@ -92,7 +92,7 @@ static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
92 | 92 | ||
93 | static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, | 93 | static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, |
94 | unsigned long offset, size_t size, | 94 | unsigned long offset, size_t size, |
95 | enum dma_data_direction direction, struct dma_attrs *attrs) | 95 | enum dma_data_direction direction, unsigned long attrs) |
96 | { | 96 | { |
97 | return page_to_bus(page) + offset; | 97 | return page_to_bus(page) + offset; |
98 | } | 98 | } |
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 90422c367ed3..d800fad87896 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c | |||
@@ -59,7 +59,7 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size, | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static void *nios2_dma_alloc(struct device *dev, size_t size, | 61 | static void *nios2_dma_alloc(struct device *dev, size_t size, |
62 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 62 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
63 | { | 63 | { |
64 | void *ret; | 64 | void *ret; |
65 | 65 | ||
@@ -84,7 +84,7 @@ static void *nios2_dma_alloc(struct device *dev, size_t size, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, | 86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, |
87 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 87 | dma_addr_t dma_handle, unsigned long attrs) |
88 | { | 88 | { |
89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); | 89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); |
90 | 90 | ||
@@ -93,7 +93,7 @@ static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, | |||
93 | 93 | ||
94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, | 94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, |
95 | int nents, enum dma_data_direction direction, | 95 | int nents, enum dma_data_direction direction, |
96 | struct dma_attrs *attrs) | 96 | unsigned long attrs) |
97 | { | 97 | { |
98 | int i; | 98 | int i; |
99 | 99 | ||
@@ -113,7 +113,7 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, | 113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, |
114 | unsigned long offset, size_t size, | 114 | unsigned long offset, size_t size, |
115 | enum dma_data_direction direction, | 115 | enum dma_data_direction direction, |
116 | struct dma_attrs *attrs) | 116 | unsigned long attrs) |
117 | { | 117 | { |
118 | void *addr = page_address(page) + offset; | 118 | void *addr = page_address(page) + offset; |
119 | 119 | ||
@@ -123,14 +123,14 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, | |||
123 | 123 | ||
124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
125 | size_t size, enum dma_data_direction direction, | 125 | size_t size, enum dma_data_direction direction, |
126 | struct dma_attrs *attrs) | 126 | unsigned long attrs) |
127 | { | 127 | { |
128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | 128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
132 | int nhwentries, enum dma_data_direction direction, | 132 | int nhwentries, enum dma_data_direction direction, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | void *addr; | 135 | void *addr; |
136 | int i; | 136 | int i; |
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 0b77ddb1ee07..140c99140649 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/dma-attrs.h> | ||
26 | 25 | ||
27 | #include <asm/cpuinfo.h> | 26 | #include <asm/cpuinfo.h> |
28 | #include <asm/spr_defs.h> | 27 | #include <asm/spr_defs.h> |
@@ -83,7 +82,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, | |||
83 | static void * | 82 | static void * |
84 | or1k_dma_alloc(struct device *dev, size_t size, | 83 | or1k_dma_alloc(struct device *dev, size_t size, |
85 | dma_addr_t *dma_handle, gfp_t gfp, | 84 | dma_addr_t *dma_handle, gfp_t gfp, |
86 | struct dma_attrs *attrs) | 85 | unsigned long attrs) |
87 | { | 86 | { |
88 | unsigned long va; | 87 | unsigned long va; |
89 | void *page; | 88 | void *page; |
@@ -101,7 +100,7 @@ or1k_dma_alloc(struct device *dev, size_t size, | |||
101 | 100 | ||
102 | va = (unsigned long)page; | 101 | va = (unsigned long)page; |
103 | 102 | ||
104 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 103 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
105 | /* | 104 | /* |
106 | * We need to iterate through the pages, clearing the dcache for | 105 | * We need to iterate through the pages, clearing the dcache for |
107 | * them and setting the cache-inhibit bit. | 106 | * them and setting the cache-inhibit bit. |
@@ -117,7 +116,7 @@ or1k_dma_alloc(struct device *dev, size_t size, | |||
117 | 116 | ||
118 | static void | 117 | static void |
119 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, | 118 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, |
120 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 119 | dma_addr_t dma_handle, unsigned long attrs) |
121 | { | 120 | { |
122 | unsigned long va = (unsigned long)vaddr; | 121 | unsigned long va = (unsigned long)vaddr; |
123 | struct mm_walk walk = { | 122 | struct mm_walk walk = { |
@@ -125,7 +124,7 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, | |||
125 | .mm = &init_mm | 124 | .mm = &init_mm |
126 | }; | 125 | }; |
127 | 126 | ||
128 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 127 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
129 | /* walk_page_range shouldn't be able to fail here */ | 128 | /* walk_page_range shouldn't be able to fail here */ |
130 | WARN_ON(walk_page_range(va, va + size, &walk)); | 129 | WARN_ON(walk_page_range(va, va + size, &walk)); |
131 | } | 130 | } |
@@ -137,7 +136,7 @@ static dma_addr_t | |||
137 | or1k_map_page(struct device *dev, struct page *page, | 136 | or1k_map_page(struct device *dev, struct page *page, |
138 | unsigned long offset, size_t size, | 137 | unsigned long offset, size_t size, |
139 | enum dma_data_direction dir, | 138 | enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | 139 | unsigned long attrs) |
141 | { | 140 | { |
142 | unsigned long cl; | 141 | unsigned long cl; |
143 | dma_addr_t addr = page_to_phys(page) + offset; | 142 | dma_addr_t addr = page_to_phys(page) + offset; |
@@ -170,7 +169,7 @@ or1k_map_page(struct device *dev, struct page *page, | |||
170 | static void | 169 | static void |
171 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | 170 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, |
172 | size_t size, enum dma_data_direction dir, | 171 | size_t size, enum dma_data_direction dir, |
173 | struct dma_attrs *attrs) | 172 | unsigned long attrs) |
174 | { | 173 | { |
175 | /* Nothing special to do here... */ | 174 | /* Nothing special to do here... */ |
176 | } | 175 | } |
@@ -178,14 +177,14 @@ or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
178 | static int | 177 | static int |
179 | or1k_map_sg(struct device *dev, struct scatterlist *sg, | 178 | or1k_map_sg(struct device *dev, struct scatterlist *sg, |
180 | int nents, enum dma_data_direction dir, | 179 | int nents, enum dma_data_direction dir, |
181 | struct dma_attrs *attrs) | 180 | unsigned long attrs) |
182 | { | 181 | { |
183 | struct scatterlist *s; | 182 | struct scatterlist *s; |
184 | int i; | 183 | int i; |
185 | 184 | ||
186 | for_each_sg(sg, s, nents, i) { | 185 | for_each_sg(sg, s, nents, i) { |
187 | s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, | 186 | s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, |
188 | s->length, dir, NULL); | 187 | s->length, dir, 0); |
189 | } | 188 | } |
190 | 189 | ||
191 | return nents; | 190 | return nents; |
@@ -194,13 +193,13 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, | |||
194 | static void | 193 | static void |
195 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | 194 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, |
196 | int nents, enum dma_data_direction dir, | 195 | int nents, enum dma_data_direction dir, |
197 | struct dma_attrs *attrs) | 196 | unsigned long attrs) |
198 | { | 197 | { |
199 | struct scatterlist *s; | 198 | struct scatterlist *s; |
200 | int i; | 199 | int i; |
201 | 200 | ||
202 | for_each_sg(sg, s, nents, i) { | 201 | for_each_sg(sg, s, nents, i) { |
203 | or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); | 202 | or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0); |
204 | } | 203 | } |
205 | } | 204 | } |
206 | 205 | ||
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index a27e4928bf73..02d9ed0f3949 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -414,7 +414,7 @@ pcxl_dma_init(void) | |||
414 | __initcall(pcxl_dma_init); | 414 | __initcall(pcxl_dma_init); |
415 | 415 | ||
416 | static void *pa11_dma_alloc(struct device *dev, size_t size, | 416 | static void *pa11_dma_alloc(struct device *dev, size_t size, |
417 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 417 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
418 | { | 418 | { |
419 | unsigned long vaddr; | 419 | unsigned long vaddr; |
420 | unsigned long paddr; | 420 | unsigned long paddr; |
@@ -441,7 +441,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size, | |||
441 | } | 441 | } |
442 | 442 | ||
443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, | 443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, |
444 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 444 | dma_addr_t dma_handle, unsigned long attrs) |
445 | { | 445 | { |
446 | int order; | 446 | int order; |
447 | 447 | ||
@@ -454,7 +454,7 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, | |||
454 | 454 | ||
455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, | 455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, |
456 | unsigned long offset, size_t size, | 456 | unsigned long offset, size_t size, |
457 | enum dma_data_direction direction, struct dma_attrs *attrs) | 457 | enum dma_data_direction direction, unsigned long attrs) |
458 | { | 458 | { |
459 | void *addr = page_address(page) + offset; | 459 | void *addr = page_address(page) + offset; |
460 | BUG_ON(direction == DMA_NONE); | 460 | BUG_ON(direction == DMA_NONE); |
@@ -465,7 +465,7 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, | |||
465 | 465 | ||
466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | 466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
467 | size_t size, enum dma_data_direction direction, | 467 | size_t size, enum dma_data_direction direction, |
468 | struct dma_attrs *attrs) | 468 | unsigned long attrs) |
469 | { | 469 | { |
470 | BUG_ON(direction == DMA_NONE); | 470 | BUG_ON(direction == DMA_NONE); |
471 | 471 | ||
@@ -484,7 +484,7 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
484 | 484 | ||
485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
486 | int nents, enum dma_data_direction direction, | 486 | int nents, enum dma_data_direction direction, |
487 | struct dma_attrs *attrs) | 487 | unsigned long attrs) |
488 | { | 488 | { |
489 | int i; | 489 | int i; |
490 | struct scatterlist *sg; | 490 | struct scatterlist *sg; |
@@ -503,7 +503,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
503 | 503 | ||
504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
505 | int nents, enum dma_data_direction direction, | 505 | int nents, enum dma_data_direction direction, |
506 | struct dma_attrs *attrs) | 506 | unsigned long attrs) |
507 | { | 507 | { |
508 | int i; | 508 | int i; |
509 | struct scatterlist *sg; | 509 | struct scatterlist *sg; |
@@ -577,11 +577,11 @@ struct dma_map_ops pcxl_dma_ops = { | |||
577 | }; | 577 | }; |
578 | 578 | ||
579 | static void *pcx_dma_alloc(struct device *dev, size_t size, | 579 | static void *pcx_dma_alloc(struct device *dev, size_t size, |
580 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 580 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
581 | { | 581 | { |
582 | void *addr; | 582 | void *addr; |
583 | 583 | ||
584 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 584 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) |
585 | return NULL; | 585 | return NULL; |
586 | 586 | ||
587 | addr = (void *)__get_free_pages(flag, get_order(size)); | 587 | addr = (void *)__get_free_pages(flag, get_order(size)); |
@@ -592,7 +592,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, | |||
592 | } | 592 | } |
593 | 593 | ||
594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, | 594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, |
595 | dma_addr_t iova, struct dma_attrs *attrs) | 595 | dma_addr_t iova, unsigned long attrs) |
596 | { | 596 | { |
597 | free_pages((unsigned long)vaddr, get_order(size)); | 597 | free_pages((unsigned long)vaddr, get_order(size)); |
598 | return; | 598 | return; |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 77816acd4fd9..84e3f8dd5e4f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -13,7 +13,6 @@ | |||
13 | /* need struct page definitions */ | 13 | /* need struct page definitions */ |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/dma-attrs.h> | ||
17 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
18 | #include <asm/io.h> | 17 | #include <asm/io.h> |
19 | #include <asm/swiotlb.h> | 18 | #include <asm/swiotlb.h> |
@@ -25,14 +24,14 @@ | |||
25 | /* Some dma direct funcs must be visible for use in other dma_ops */ | 24 | /* Some dma direct funcs must be visible for use in other dma_ops */ |
26 | extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | 25 | extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, |
27 | dma_addr_t *dma_handle, gfp_t flag, | 26 | dma_addr_t *dma_handle, gfp_t flag, |
28 | struct dma_attrs *attrs); | 27 | unsigned long attrs); |
29 | extern void __dma_direct_free_coherent(struct device *dev, size_t size, | 28 | extern void __dma_direct_free_coherent(struct device *dev, size_t size, |
30 | void *vaddr, dma_addr_t dma_handle, | 29 | void *vaddr, dma_addr_t dma_handle, |
31 | struct dma_attrs *attrs); | 30 | unsigned long attrs); |
32 | extern int dma_direct_mmap_coherent(struct device *dev, | 31 | extern int dma_direct_mmap_coherent(struct device *dev, |
33 | struct vm_area_struct *vma, | 32 | struct vm_area_struct *vma, |
34 | void *cpu_addr, dma_addr_t handle, | 33 | void *cpu_addr, dma_addr_t handle, |
35 | size_t size, struct dma_attrs *attrs); | 34 | size_t size, unsigned long attrs); |
36 | 35 | ||
37 | #ifdef CONFIG_NOT_COHERENT_CACHE | 36 | #ifdef CONFIG_NOT_COHERENT_CACHE |
38 | /* | 37 | /* |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f49a72a9062d..2c1d50792944 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -53,7 +53,7 @@ struct iommu_table_ops { | |||
53 | long index, long npages, | 53 | long index, long npages, |
54 | unsigned long uaddr, | 54 | unsigned long uaddr, |
55 | enum dma_data_direction direction, | 55 | enum dma_data_direction direction, |
56 | struct dma_attrs *attrs); | 56 | unsigned long attrs); |
57 | #ifdef CONFIG_IOMMU_API | 57 | #ifdef CONFIG_IOMMU_API |
58 | /* | 58 | /* |
59 | * Exchanges existing TCE with new TCE plus direction bits; | 59 | * Exchanges existing TCE with new TCE plus direction bits; |
@@ -248,12 +248,12 @@ extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
248 | struct scatterlist *sglist, int nelems, | 248 | struct scatterlist *sglist, int nelems, |
249 | unsigned long mask, | 249 | unsigned long mask, |
250 | enum dma_data_direction direction, | 250 | enum dma_data_direction direction, |
251 | struct dma_attrs *attrs); | 251 | unsigned long attrs); |
252 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, | 252 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, |
253 | struct scatterlist *sglist, | 253 | struct scatterlist *sglist, |
254 | int nelems, | 254 | int nelems, |
255 | enum dma_data_direction direction, | 255 | enum dma_data_direction direction, |
256 | struct dma_attrs *attrs); | 256 | unsigned long attrs); |
257 | 257 | ||
258 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | 258 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
259 | size_t size, dma_addr_t *dma_handle, | 259 | size_t size, dma_addr_t *dma_handle, |
@@ -264,10 +264,10 @@ extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
264 | struct page *page, unsigned long offset, | 264 | struct page *page, unsigned long offset, |
265 | size_t size, unsigned long mask, | 265 | size_t size, unsigned long mask, |
266 | enum dma_data_direction direction, | 266 | enum dma_data_direction direction, |
267 | struct dma_attrs *attrs); | 267 | unsigned long attrs); |
268 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, | 268 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
269 | size_t size, enum dma_data_direction direction, | 269 | size_t size, enum dma_data_direction direction, |
270 | struct dma_attrs *attrs); | 270 | unsigned long attrs); |
271 | 271 | ||
272 | extern void iommu_init_early_pSeries(void); | 272 | extern void iommu_init_early_pSeries(void); |
273 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); | 273 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 41a7d9d49a5a..fb7cbaa37658 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | 19 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t flag, | 20 | dma_addr_t *dma_handle, gfp_t flag, |
21 | struct dma_attrs *attrs) | 21 | unsigned long attrs) |
22 | { | 22 | { |
23 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, | 23 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
24 | dma_handle, dev->coherent_dma_mask, flag, | 24 | dma_handle, dev->coherent_dma_mask, flag, |
@@ -27,7 +27,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
27 | 27 | ||
28 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | 28 | static void dma_iommu_free_coherent(struct device *dev, size_t size, |
29 | void *vaddr, dma_addr_t dma_handle, | 29 | void *vaddr, dma_addr_t dma_handle, |
30 | struct dma_attrs *attrs) | 30 | unsigned long attrs) |
31 | { | 31 | { |
32 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); | 32 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); |
33 | } | 33 | } |
@@ -40,7 +40,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, | |||
40 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, | 40 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, |
41 | unsigned long offset, size_t size, | 41 | unsigned long offset, size_t size, |
42 | enum dma_data_direction direction, | 42 | enum dma_data_direction direction, |
43 | struct dma_attrs *attrs) | 43 | unsigned long attrs) |
44 | { | 44 | { |
45 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, | 45 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, |
46 | size, device_to_mask(dev), direction, attrs); | 46 | size, device_to_mask(dev), direction, attrs); |
@@ -49,7 +49,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, | |||
49 | 49 | ||
50 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | 50 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
51 | size_t size, enum dma_data_direction direction, | 51 | size_t size, enum dma_data_direction direction, |
52 | struct dma_attrs *attrs) | 52 | unsigned long attrs) |
53 | { | 53 | { |
54 | iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, | 54 | iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, |
55 | attrs); | 55 | attrs); |
@@ -58,7 +58,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
58 | 58 | ||
59 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 59 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
60 | int nelems, enum dma_data_direction direction, | 60 | int nelems, enum dma_data_direction direction, |
61 | struct dma_attrs *attrs) | 61 | unsigned long attrs) |
62 | { | 62 | { |
63 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, | 63 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, |
64 | device_to_mask(dev), direction, attrs); | 64 | device_to_mask(dev), direction, attrs); |
@@ -66,7 +66,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |||
66 | 66 | ||
67 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | 67 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
68 | int nelems, enum dma_data_direction direction, | 68 | int nelems, enum dma_data_direction direction, |
69 | struct dma_attrs *attrs) | 69 | unsigned long attrs) |
70 | { | 70 | { |
71 | ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, | 71 | ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, |
72 | direction, attrs); | 72 | direction, attrs); |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3f1472a78f39..e64a6016fba7 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -64,7 +64,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
64 | 64 | ||
65 | void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | 65 | void *__dma_direct_alloc_coherent(struct device *dev, size_t size, |
66 | dma_addr_t *dma_handle, gfp_t flag, | 66 | dma_addr_t *dma_handle, gfp_t flag, |
67 | struct dma_attrs *attrs) | 67 | unsigned long attrs) |
68 | { | 68 | { |
69 | void *ret; | 69 | void *ret; |
70 | #ifdef CONFIG_NOT_COHERENT_CACHE | 70 | #ifdef CONFIG_NOT_COHERENT_CACHE |
@@ -121,7 +121,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
121 | 121 | ||
122 | void __dma_direct_free_coherent(struct device *dev, size_t size, | 122 | void __dma_direct_free_coherent(struct device *dev, size_t size, |
123 | void *vaddr, dma_addr_t dma_handle, | 123 | void *vaddr, dma_addr_t dma_handle, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | #ifdef CONFIG_NOT_COHERENT_CACHE | 126 | #ifdef CONFIG_NOT_COHERENT_CACHE |
127 | __dma_free_coherent(size, vaddr); | 127 | __dma_free_coherent(size, vaddr); |
@@ -132,7 +132,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size, | |||
132 | 132 | ||
133 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | 133 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
134 | dma_addr_t *dma_handle, gfp_t flag, | 134 | dma_addr_t *dma_handle, gfp_t flag, |
135 | struct dma_attrs *attrs) | 135 | unsigned long attrs) |
136 | { | 136 | { |
137 | struct iommu_table *iommu; | 137 | struct iommu_table *iommu; |
138 | 138 | ||
@@ -156,7 +156,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
156 | 156 | ||
157 | static void dma_direct_free_coherent(struct device *dev, size_t size, | 157 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
158 | void *vaddr, dma_addr_t dma_handle, | 158 | void *vaddr, dma_addr_t dma_handle, |
159 | struct dma_attrs *attrs) | 159 | unsigned long attrs) |
160 | { | 160 | { |
161 | struct iommu_table *iommu; | 161 | struct iommu_table *iommu; |
162 | 162 | ||
@@ -177,7 +177,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, | |||
177 | 177 | ||
178 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 178 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
179 | void *cpu_addr, dma_addr_t handle, size_t size, | 179 | void *cpu_addr, dma_addr_t handle, size_t size, |
180 | struct dma_attrs *attrs) | 180 | unsigned long attrs) |
181 | { | 181 | { |
182 | unsigned long pfn; | 182 | unsigned long pfn; |
183 | 183 | ||
@@ -195,7 +195,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
195 | 195 | ||
196 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 196 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
197 | int nents, enum dma_data_direction direction, | 197 | int nents, enum dma_data_direction direction, |
198 | struct dma_attrs *attrs) | 198 | unsigned long attrs) |
199 | { | 199 | { |
200 | struct scatterlist *sg; | 200 | struct scatterlist *sg; |
201 | int i; | 201 | int i; |
@@ -211,7 +211,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
211 | 211 | ||
212 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | 212 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
213 | int nents, enum dma_data_direction direction, | 213 | int nents, enum dma_data_direction direction, |
214 | struct dma_attrs *attrs) | 214 | unsigned long attrs) |
215 | { | 215 | { |
216 | } | 216 | } |
217 | 217 | ||
@@ -232,7 +232,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
232 | unsigned long offset, | 232 | unsigned long offset, |
233 | size_t size, | 233 | size_t size, |
234 | enum dma_data_direction dir, | 234 | enum dma_data_direction dir, |
235 | struct dma_attrs *attrs) | 235 | unsigned long attrs) |
236 | { | 236 | { |
237 | BUG_ON(dir == DMA_NONE); | 237 | BUG_ON(dir == DMA_NONE); |
238 | __dma_sync_page(page, offset, size, dir); | 238 | __dma_sync_page(page, offset, size, dir); |
@@ -243,7 +243,7 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
243 | dma_addr_t dma_address, | 243 | dma_addr_t dma_address, |
244 | size_t size, | 244 | size_t size, |
245 | enum dma_data_direction direction, | 245 | enum dma_data_direction direction, |
246 | struct dma_attrs *attrs) | 246 | unsigned long attrs) |
247 | { | 247 | { |
248 | } | 248 | } |
249 | 249 | ||
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index a89f4f7a66bd..c1ca9282f4a0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -65,7 +65,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, | |||
65 | size_t size, | 65 | size_t size, |
66 | dma_addr_t *dma_handle, | 66 | dma_addr_t *dma_handle, |
67 | gfp_t flag, | 67 | gfp_t flag, |
68 | struct dma_attrs *attrs) | 68 | unsigned long attrs) |
69 | { | 69 | { |
70 | void *mem; | 70 | void *mem; |
71 | 71 | ||
@@ -78,7 +78,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, | |||
78 | static void ibmebus_free_coherent(struct device *dev, | 78 | static void ibmebus_free_coherent(struct device *dev, |
79 | size_t size, void *vaddr, | 79 | size_t size, void *vaddr, |
80 | dma_addr_t dma_handle, | 80 | dma_addr_t dma_handle, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | kfree(vaddr); | 83 | kfree(vaddr); |
84 | } | 84 | } |
@@ -88,7 +88,7 @@ static dma_addr_t ibmebus_map_page(struct device *dev, | |||
88 | unsigned long offset, | 88 | unsigned long offset, |
89 | size_t size, | 89 | size_t size, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | return (dma_addr_t)(page_address(page) + offset); | 93 | return (dma_addr_t)(page_address(page) + offset); |
94 | } | 94 | } |
@@ -97,7 +97,7 @@ static void ibmebus_unmap_page(struct device *dev, | |||
97 | dma_addr_t dma_addr, | 97 | dma_addr_t dma_addr, |
98 | size_t size, | 98 | size_t size, |
99 | enum dma_data_direction direction, | 99 | enum dma_data_direction direction, |
100 | struct dma_attrs *attrs) | 100 | unsigned long attrs) |
101 | { | 101 | { |
102 | return; | 102 | return; |
103 | } | 103 | } |
@@ -105,7 +105,7 @@ static void ibmebus_unmap_page(struct device *dev, | |||
105 | static int ibmebus_map_sg(struct device *dev, | 105 | static int ibmebus_map_sg(struct device *dev, |
106 | struct scatterlist *sgl, | 106 | struct scatterlist *sgl, |
107 | int nents, enum dma_data_direction direction, | 107 | int nents, enum dma_data_direction direction, |
108 | struct dma_attrs *attrs) | 108 | unsigned long attrs) |
109 | { | 109 | { |
110 | struct scatterlist *sg; | 110 | struct scatterlist *sg; |
111 | int i; | 111 | int i; |
@@ -121,7 +121,7 @@ static int ibmebus_map_sg(struct device *dev, | |||
121 | static void ibmebus_unmap_sg(struct device *dev, | 121 | static void ibmebus_unmap_sg(struct device *dev, |
122 | struct scatterlist *sg, | 122 | struct scatterlist *sg, |
123 | int nents, enum dma_data_direction direction, | 123 | int nents, enum dma_data_direction direction, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | return; | 126 | return; |
127 | } | 127 | } |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a8e3490b54e3..37d6e741be82 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -307,7 +307,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
307 | void *page, unsigned int npages, | 307 | void *page, unsigned int npages, |
308 | enum dma_data_direction direction, | 308 | enum dma_data_direction direction, |
309 | unsigned long mask, unsigned int align_order, | 309 | unsigned long mask, unsigned int align_order, |
310 | struct dma_attrs *attrs) | 310 | unsigned long attrs) |
311 | { | 311 | { |
312 | unsigned long entry; | 312 | unsigned long entry; |
313 | dma_addr_t ret = DMA_ERROR_CODE; | 313 | dma_addr_t ret = DMA_ERROR_CODE; |
@@ -431,7 +431,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
431 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | 431 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
432 | struct scatterlist *sglist, int nelems, | 432 | struct scatterlist *sglist, int nelems, |
433 | unsigned long mask, enum dma_data_direction direction, | 433 | unsigned long mask, enum dma_data_direction direction, |
434 | struct dma_attrs *attrs) | 434 | unsigned long attrs) |
435 | { | 435 | { |
436 | dma_addr_t dma_next = 0, dma_addr; | 436 | dma_addr_t dma_next = 0, dma_addr; |
437 | struct scatterlist *s, *outs, *segstart; | 437 | struct scatterlist *s, *outs, *segstart; |
@@ -574,7 +574,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
574 | 574 | ||
575 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 575 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
576 | int nelems, enum dma_data_direction direction, | 576 | int nelems, enum dma_data_direction direction, |
577 | struct dma_attrs *attrs) | 577 | unsigned long attrs) |
578 | { | 578 | { |
579 | struct scatterlist *sg; | 579 | struct scatterlist *sg; |
580 | 580 | ||
@@ -753,7 +753,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) | |||
753 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | 753 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
754 | struct page *page, unsigned long offset, size_t size, | 754 | struct page *page, unsigned long offset, size_t size, |
755 | unsigned long mask, enum dma_data_direction direction, | 755 | unsigned long mask, enum dma_data_direction direction, |
756 | struct dma_attrs *attrs) | 756 | unsigned long attrs) |
757 | { | 757 | { |
758 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 758 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
759 | void *vaddr; | 759 | void *vaddr; |
@@ -790,7 +790,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
790 | 790 | ||
791 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, | 791 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
792 | size_t size, enum dma_data_direction direction, | 792 | size_t size, enum dma_data_direction direction, |
793 | struct dma_attrs *attrs) | 793 | unsigned long attrs) |
794 | { | 794 | { |
795 | unsigned int npages; | 795 | unsigned int npages; |
796 | 796 | ||
@@ -845,7 +845,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
845 | nio_pages = size >> tbl->it_page_shift; | 845 | nio_pages = size >> tbl->it_page_shift; |
846 | io_order = get_iommu_order(size, tbl); | 846 | io_order = get_iommu_order(size, tbl); |
847 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 847 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
848 | mask >> tbl->it_page_shift, io_order, NULL); | 848 | mask >> tbl->it_page_shift, io_order, 0); |
849 | if (mapping == DMA_ERROR_CODE) { | 849 | if (mapping == DMA_ERROR_CODE) { |
850 | free_pages((unsigned long)ret, order); | 850 | free_pages((unsigned long)ret, order); |
851 | return NULL; | 851 | return NULL; |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 8d7358f3a273..b3813ddb2fb4 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work) | |||
482 | 482 | ||
483 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | 483 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, |
484 | dma_addr_t *dma_handle, gfp_t flag, | 484 | dma_addr_t *dma_handle, gfp_t flag, |
485 | struct dma_attrs *attrs) | 485 | unsigned long attrs) |
486 | { | 486 | { |
487 | struct vio_dev *viodev = to_vio_dev(dev); | 487 | struct vio_dev *viodev = to_vio_dev(dev); |
488 | void *ret; | 488 | void *ret; |
@@ -503,7 +503,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
503 | 503 | ||
504 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | 504 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, |
505 | void *vaddr, dma_addr_t dma_handle, | 505 | void *vaddr, dma_addr_t dma_handle, |
506 | struct dma_attrs *attrs) | 506 | unsigned long attrs) |
507 | { | 507 | { |
508 | struct vio_dev *viodev = to_vio_dev(dev); | 508 | struct vio_dev *viodev = to_vio_dev(dev); |
509 | 509 | ||
@@ -515,7 +515,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |||
515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, | 515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
516 | unsigned long offset, size_t size, | 516 | unsigned long offset, size_t size, |
517 | enum dma_data_direction direction, | 517 | enum dma_data_direction direction, |
518 | struct dma_attrs *attrs) | 518 | unsigned long attrs) |
519 | { | 519 | { |
520 | struct vio_dev *viodev = to_vio_dev(dev); | 520 | struct vio_dev *viodev = to_vio_dev(dev); |
521 | struct iommu_table *tbl; | 521 | struct iommu_table *tbl; |
@@ -539,7 +539,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, | |||
539 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | 539 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
540 | size_t size, | 540 | size_t size, |
541 | enum dma_data_direction direction, | 541 | enum dma_data_direction direction, |
542 | struct dma_attrs *attrs) | 542 | unsigned long attrs) |
543 | { | 543 | { |
544 | struct vio_dev *viodev = to_vio_dev(dev); | 544 | struct vio_dev *viodev = to_vio_dev(dev); |
545 | struct iommu_table *tbl; | 545 | struct iommu_table *tbl; |
@@ -552,7 +552,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
552 | 552 | ||
553 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 553 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
554 | int nelems, enum dma_data_direction direction, | 554 | int nelems, enum dma_data_direction direction, |
555 | struct dma_attrs *attrs) | 555 | unsigned long attrs) |
556 | { | 556 | { |
557 | struct vio_dev *viodev = to_vio_dev(dev); | 557 | struct vio_dev *viodev = to_vio_dev(dev); |
558 | struct iommu_table *tbl; | 558 | struct iommu_table *tbl; |
@@ -588,7 +588,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |||
588 | static void vio_dma_iommu_unmap_sg(struct device *dev, | 588 | static void vio_dma_iommu_unmap_sg(struct device *dev, |
589 | struct scatterlist *sglist, int nelems, | 589 | struct scatterlist *sglist, int nelems, |
590 | enum dma_data_direction direction, | 590 | enum dma_data_direction direction, |
591 | struct dma_attrs *attrs) | 591 | unsigned long attrs) |
592 | { | 592 | { |
593 | struct vio_dev *viodev = to_vio_dev(dev); | 593 | struct vio_dev *viodev = to_vio_dev(dev); |
594 | struct iommu_table *tbl; | 594 | struct iommu_table *tbl; |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 9027d7c48507..f7d1a4953ea0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -166,7 +166,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, | |||
166 | 166 | ||
167 | static int tce_build_cell(struct iommu_table *tbl, long index, long npages, | 167 | static int tce_build_cell(struct iommu_table *tbl, long index, long npages, |
168 | unsigned long uaddr, enum dma_data_direction direction, | 168 | unsigned long uaddr, enum dma_data_direction direction, |
169 | struct dma_attrs *attrs) | 169 | unsigned long attrs) |
170 | { | 170 | { |
171 | int i; | 171 | int i; |
172 | unsigned long *io_pte, base_pte; | 172 | unsigned long *io_pte, base_pte; |
@@ -193,7 +193,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, | |||
193 | base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | | 193 | base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | |
194 | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); | 194 | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); |
195 | #endif | 195 | #endif |
196 | if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) | 196 | if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) |
197 | base_pte &= ~CBE_IOPTE_SO_RW; | 197 | base_pte &= ~CBE_IOPTE_SO_RW; |
198 | 198 | ||
199 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); | 199 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); |
@@ -526,7 +526,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
526 | 526 | ||
527 | __set_bit(0, window->table.it_map); | 527 | __set_bit(0, window->table.it_map); |
528 | tce_build_cell(&window->table, window->table.it_offset, 1, | 528 | tce_build_cell(&window->table, window->table.it_offset, 1, |
529 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); | 529 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); |
530 | 530 | ||
531 | return window; | 531 | return window; |
532 | } | 532 | } |
@@ -572,7 +572,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) | |||
572 | 572 | ||
573 | static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, | 573 | static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, |
574 | dma_addr_t *dma_handle, gfp_t flag, | 574 | dma_addr_t *dma_handle, gfp_t flag, |
575 | struct dma_attrs *attrs) | 575 | unsigned long attrs) |
576 | { | 576 | { |
577 | if (iommu_fixed_is_weak) | 577 | if (iommu_fixed_is_weak) |
578 | return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), | 578 | return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), |
@@ -586,7 +586,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, | |||
586 | 586 | ||
587 | static void dma_fixed_free_coherent(struct device *dev, size_t size, | 587 | static void dma_fixed_free_coherent(struct device *dev, size_t size, |
588 | void *vaddr, dma_addr_t dma_handle, | 588 | void *vaddr, dma_addr_t dma_handle, |
589 | struct dma_attrs *attrs) | 589 | unsigned long attrs) |
590 | { | 590 | { |
591 | if (iommu_fixed_is_weak) | 591 | if (iommu_fixed_is_weak) |
592 | iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, | 592 | iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, |
@@ -598,9 +598,9 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size, | |||
598 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, | 598 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, |
599 | unsigned long offset, size_t size, | 599 | unsigned long offset, size_t size, |
600 | enum dma_data_direction direction, | 600 | enum dma_data_direction direction, |
601 | struct dma_attrs *attrs) | 601 | unsigned long attrs) |
602 | { | 602 | { |
603 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 603 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
604 | return dma_direct_ops.map_page(dev, page, offset, size, | 604 | return dma_direct_ops.map_page(dev, page, offset, size, |
605 | direction, attrs); | 605 | direction, attrs); |
606 | else | 606 | else |
@@ -611,9 +611,9 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, | |||
611 | 611 | ||
612 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, | 612 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, |
613 | size_t size, enum dma_data_direction direction, | 613 | size_t size, enum dma_data_direction direction, |
614 | struct dma_attrs *attrs) | 614 | unsigned long attrs) |
615 | { | 615 | { |
616 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 616 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
617 | dma_direct_ops.unmap_page(dev, dma_addr, size, direction, | 617 | dma_direct_ops.unmap_page(dev, dma_addr, size, direction, |
618 | attrs); | 618 | attrs); |
619 | else | 619 | else |
@@ -623,9 +623,9 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
623 | 623 | ||
624 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | 624 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, |
625 | int nents, enum dma_data_direction direction, | 625 | int nents, enum dma_data_direction direction, |
626 | struct dma_attrs *attrs) | 626 | unsigned long attrs) |
627 | { | 627 | { |
628 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 628 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
629 | return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); | 629 | return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); |
630 | else | 630 | else |
631 | return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, | 631 | return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, |
@@ -635,9 +635,9 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | |||
635 | 635 | ||
636 | static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, | 636 | static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, |
637 | int nents, enum dma_data_direction direction, | 637 | int nents, enum dma_data_direction direction, |
638 | struct dma_attrs *attrs) | 638 | unsigned long attrs) |
639 | { | 639 | { |
640 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 640 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
641 | dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); | 641 | dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); |
642 | else | 642 | else |
643 | ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, | 643 | ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, |
@@ -1162,7 +1162,7 @@ static int __init setup_iommu_fixed(char *str) | |||
1162 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); | 1162 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); |
1163 | 1163 | ||
1164 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) | 1164 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) |
1165 | iommu_fixed_is_weak = 1; | 1165 | iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; |
1166 | 1166 | ||
1167 | of_node_put(pciep); | 1167 | of_node_put(pciep); |
1168 | 1168 | ||
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 43dd3fb514e0..309d9ccccd50 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -88,7 +88,7 @@ static int iommu_table_iobmap_inited; | |||
88 | static int iobmap_build(struct iommu_table *tbl, long index, | 88 | static int iobmap_build(struct iommu_table *tbl, long index, |
89 | long npages, unsigned long uaddr, | 89 | long npages, unsigned long uaddr, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | u32 *ip; | 93 | u32 *ip; |
94 | u32 rpn; | 94 | u32 rpn; |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4383a5ff82ba..00e1a0195c78 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(pnv_pci_get_npu_dev); | |||
73 | 73 | ||
74 | static void *dma_npu_alloc(struct device *dev, size_t size, | 74 | static void *dma_npu_alloc(struct device *dev, size_t size, |
75 | dma_addr_t *dma_handle, gfp_t flag, | 75 | dma_addr_t *dma_handle, gfp_t flag, |
76 | struct dma_attrs *attrs) | 76 | unsigned long attrs) |
77 | { | 77 | { |
78 | NPU_DMA_OP_UNSUPPORTED(); | 78 | NPU_DMA_OP_UNSUPPORTED(); |
79 | return NULL; | 79 | return NULL; |
@@ -81,7 +81,7 @@ static void *dma_npu_alloc(struct device *dev, size_t size, | |||
81 | 81 | ||
82 | static void dma_npu_free(struct device *dev, size_t size, | 82 | static void dma_npu_free(struct device *dev, size_t size, |
83 | void *vaddr, dma_addr_t dma_handle, | 83 | void *vaddr, dma_addr_t dma_handle, |
84 | struct dma_attrs *attrs) | 84 | unsigned long attrs) |
85 | { | 85 | { |
86 | NPU_DMA_OP_UNSUPPORTED(); | 86 | NPU_DMA_OP_UNSUPPORTED(); |
87 | } | 87 | } |
@@ -89,7 +89,7 @@ static void dma_npu_free(struct device *dev, size_t size, | |||
89 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | 89 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, |
90 | unsigned long offset, size_t size, | 90 | unsigned long offset, size_t size, |
91 | enum dma_data_direction direction, | 91 | enum dma_data_direction direction, |
92 | struct dma_attrs *attrs) | 92 | unsigned long attrs) |
93 | { | 93 | { |
94 | NPU_DMA_OP_UNSUPPORTED(); | 94 | NPU_DMA_OP_UNSUPPORTED(); |
95 | return 0; | 95 | return 0; |
@@ -97,7 +97,7 @@ static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | |||
97 | 97 | ||
98 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, | 98 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, |
99 | int nelems, enum dma_data_direction direction, | 99 | int nelems, enum dma_data_direction direction, |
100 | struct dma_attrs *attrs) | 100 | unsigned long attrs) |
101 | { | 101 | { |
102 | NPU_DMA_OP_UNSUPPORTED(); | 102 | NPU_DMA_OP_UNSUPPORTED(); |
103 | return 0; | 103 | return 0; |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 891fc4a453df..6b9528307f62 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1806,7 +1806,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, | |||
1806 | static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, | 1806 | static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, |
1807 | long npages, unsigned long uaddr, | 1807 | long npages, unsigned long uaddr, |
1808 | enum dma_data_direction direction, | 1808 | enum dma_data_direction direction, |
1809 | struct dma_attrs *attrs) | 1809 | unsigned long attrs) |
1810 | { | 1810 | { |
1811 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, | 1811 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, |
1812 | attrs); | 1812 | attrs); |
@@ -1950,7 +1950,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, | |||
1950 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, | 1950 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, |
1951 | long npages, unsigned long uaddr, | 1951 | long npages, unsigned long uaddr, |
1952 | enum dma_data_direction direction, | 1952 | enum dma_data_direction direction, |
1953 | struct dma_attrs *attrs) | 1953 | unsigned long attrs) |
1954 | { | 1954 | { |
1955 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, | 1955 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, |
1956 | attrs); | 1956 | attrs); |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 6701dd5ded20..a21d831c1114 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -704,7 +704,7 @@ static __be64 *pnv_tce(struct iommu_table *tbl, long idx) | |||
704 | 704 | ||
705 | int pnv_tce_build(struct iommu_table *tbl, long index, long npages, | 705 | int pnv_tce_build(struct iommu_table *tbl, long index, long npages, |
706 | unsigned long uaddr, enum dma_data_direction direction, | 706 | unsigned long uaddr, enum dma_data_direction direction, |
707 | struct dma_attrs *attrs) | 707 | unsigned long attrs) |
708 | { | 708 | { |
709 | u64 proto_tce = iommu_direction_to_tce_perm(direction); | 709 | u64 proto_tce = iommu_direction_to_tce_perm(direction); |
710 | u64 rpn = __pa(uaddr) >> tbl->it_page_shift; | 710 | u64 rpn = __pa(uaddr) >> tbl->it_page_shift; |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index d088d4f06116..e64df7894d6e 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -181,7 +181,7 @@ struct pnv_phb { | |||
181 | extern struct pci_ops pnv_pci_ops; | 181 | extern struct pci_ops pnv_pci_ops; |
182 | extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, | 182 | extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, |
183 | unsigned long uaddr, enum dma_data_direction direction, | 183 | unsigned long uaddr, enum dma_data_direction direction, |
184 | struct dma_attrs *attrs); | 184 | unsigned long attrs); |
185 | extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); | 185 | extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); |
186 | extern int pnv_tce_xchg(struct iommu_table *tbl, long index, | 186 | extern int pnv_tce_xchg(struct iommu_table *tbl, long index, |
187 | unsigned long *hpa, enum dma_data_direction *direction); | 187 | unsigned long *hpa, enum dma_data_direction *direction); |
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 5606fe36faf2..8af1c15aef85 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c | |||
@@ -516,7 +516,7 @@ core_initcall(ps3_system_bus_init); | |||
516 | */ | 516 | */ |
517 | static void * ps3_alloc_coherent(struct device *_dev, size_t size, | 517 | static void * ps3_alloc_coherent(struct device *_dev, size_t size, |
518 | dma_addr_t *dma_handle, gfp_t flag, | 518 | dma_addr_t *dma_handle, gfp_t flag, |
519 | struct dma_attrs *attrs) | 519 | unsigned long attrs) |
520 | { | 520 | { |
521 | int result; | 521 | int result; |
522 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 522 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
@@ -553,7 +553,7 @@ clean_none: | |||
553 | } | 553 | } |
554 | 554 | ||
555 | static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, | 555 | static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, |
556 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 556 | dma_addr_t dma_handle, unsigned long attrs) |
557 | { | 557 | { |
558 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 558 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
559 | 559 | ||
@@ -569,7 +569,7 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, | |||
569 | 569 | ||
570 | static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, | 570 | static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, |
571 | unsigned long offset, size_t size, enum dma_data_direction direction, | 571 | unsigned long offset, size_t size, enum dma_data_direction direction, |
572 | struct dma_attrs *attrs) | 572 | unsigned long attrs) |
573 | { | 573 | { |
574 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 574 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
575 | int result; | 575 | int result; |
@@ -592,7 +592,7 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, | |||
592 | static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, | 592 | static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, |
593 | unsigned long offset, size_t size, | 593 | unsigned long offset, size_t size, |
594 | enum dma_data_direction direction, | 594 | enum dma_data_direction direction, |
595 | struct dma_attrs *attrs) | 595 | unsigned long attrs) |
596 | { | 596 | { |
597 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 597 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
598 | int result; | 598 | int result; |
@@ -626,7 +626,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, | |||
626 | } | 626 | } |
627 | 627 | ||
628 | static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, | 628 | static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, |
629 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) | 629 | size_t size, enum dma_data_direction direction, unsigned long attrs) |
630 | { | 630 | { |
631 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 631 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
632 | int result; | 632 | int result; |
@@ -640,7 +640,7 @@ static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, | |||
640 | } | 640 | } |
641 | 641 | ||
642 | static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, | 642 | static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, |
643 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 643 | int nents, enum dma_data_direction direction, unsigned long attrs) |
644 | { | 644 | { |
645 | #if defined(CONFIG_PS3_DYNAMIC_DMA) | 645 | #if defined(CONFIG_PS3_DYNAMIC_DMA) |
646 | BUG_ON("do"); | 646 | BUG_ON("do"); |
@@ -670,14 +670,14 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, | |||
670 | static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, | 670 | static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, |
671 | int nents, | 671 | int nents, |
672 | enum dma_data_direction direction, | 672 | enum dma_data_direction direction, |
673 | struct dma_attrs *attrs) | 673 | unsigned long attrs) |
674 | { | 674 | { |
675 | BUG(); | 675 | BUG(); |
676 | return 0; | 676 | return 0; |
677 | } | 677 | } |
678 | 678 | ||
679 | static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, | 679 | static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, |
680 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 680 | int nents, enum dma_data_direction direction, unsigned long attrs) |
681 | { | 681 | { |
682 | #if defined(CONFIG_PS3_DYNAMIC_DMA) | 682 | #if defined(CONFIG_PS3_DYNAMIC_DMA) |
683 | BUG_ON("do"); | 683 | BUG_ON("do"); |
@@ -686,7 +686,7 @@ static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, | |||
686 | 686 | ||
687 | static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, | 687 | static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, |
688 | int nents, enum dma_data_direction direction, | 688 | int nents, enum dma_data_direction direction, |
689 | struct dma_attrs *attrs) | 689 | unsigned long attrs) |
690 | { | 690 | { |
691 | BUG(); | 691 | BUG(); |
692 | } | 692 | } |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 770a753b52c9..0024e451bb36 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -123,7 +123,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, | |||
123 | static int tce_build_pSeries(struct iommu_table *tbl, long index, | 123 | static int tce_build_pSeries(struct iommu_table *tbl, long index, |
124 | long npages, unsigned long uaddr, | 124 | long npages, unsigned long uaddr, |
125 | enum dma_data_direction direction, | 125 | enum dma_data_direction direction, |
126 | struct dma_attrs *attrs) | 126 | unsigned long attrs) |
127 | { | 127 | { |
128 | u64 proto_tce; | 128 | u64 proto_tce; |
129 | __be64 *tcep, *tces; | 129 | __be64 *tcep, *tces; |
@@ -173,7 +173,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); | |||
173 | static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | 173 | static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, |
174 | long npages, unsigned long uaddr, | 174 | long npages, unsigned long uaddr, |
175 | enum dma_data_direction direction, | 175 | enum dma_data_direction direction, |
176 | struct dma_attrs *attrs) | 176 | unsigned long attrs) |
177 | { | 177 | { |
178 | u64 rc = 0; | 178 | u64 rc = 0; |
179 | u64 proto_tce, tce; | 179 | u64 proto_tce, tce; |
@@ -216,7 +216,7 @@ static DEFINE_PER_CPU(__be64 *, tce_page); | |||
216 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | 216 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
217 | long npages, unsigned long uaddr, | 217 | long npages, unsigned long uaddr, |
218 | enum dma_data_direction direction, | 218 | enum dma_data_direction direction, |
219 | struct dma_attrs *attrs) | 219 | unsigned long attrs) |
220 | { | 220 | { |
221 | u64 rc = 0; | 221 | u64 rc = 0; |
222 | u64 proto_tce; | 222 | u64 proto_tce; |
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 26904f4879ec..3573d54b2770 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -185,7 +185,7 @@ static void dart_flush(struct iommu_table *tbl) | |||
185 | static int dart_build(struct iommu_table *tbl, long index, | 185 | static int dart_build(struct iommu_table *tbl, long index, |
186 | long npages, unsigned long uaddr, | 186 | long npages, unsigned long uaddr, |
187 | enum dma_data_direction direction, | 187 | enum dma_data_direction direction, |
188 | struct dma_attrs *attrs) | 188 | unsigned long attrs) |
189 | { | 189 | { |
190 | unsigned int *dp, *orig_dp; | 190 | unsigned int *dp, *orig_dp; |
191 | unsigned int rpn; | 191 | unsigned int rpn; |
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3249b7464889..ffaba07f50ab 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-attrs.h> | ||
9 | #include <linux/dma-debug.h> | 8 | #include <linux/dma-debug.h> |
10 | #include <linux/io.h> | 9 | #include <linux/io.h> |
11 | 10 | ||
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 070f1ae5cfad..7297fce9bf80 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -286,7 +286,7 @@ static inline void zpci_err_dma(unsigned long rc, unsigned long addr) | |||
286 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | 286 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, |
287 | unsigned long offset, size_t size, | 287 | unsigned long offset, size_t size, |
288 | enum dma_data_direction direction, | 288 | enum dma_data_direction direction, |
289 | struct dma_attrs *attrs) | 289 | unsigned long attrs) |
290 | { | 290 | { |
291 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 291 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
292 | unsigned long nr_pages, iommu_page_index; | 292 | unsigned long nr_pages, iommu_page_index; |
@@ -332,7 +332,7 @@ out_err: | |||
332 | 332 | ||
333 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | 333 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, |
334 | size_t size, enum dma_data_direction direction, | 334 | size_t size, enum dma_data_direction direction, |
335 | struct dma_attrs *attrs) | 335 | unsigned long attrs) |
336 | { | 336 | { |
337 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 337 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
338 | unsigned long iommu_page_index; | 338 | unsigned long iommu_page_index; |
@@ -355,7 +355,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |||
355 | 355 | ||
356 | static void *s390_dma_alloc(struct device *dev, size_t size, | 356 | static void *s390_dma_alloc(struct device *dev, size_t size, |
357 | dma_addr_t *dma_handle, gfp_t flag, | 357 | dma_addr_t *dma_handle, gfp_t flag, |
358 | struct dma_attrs *attrs) | 358 | unsigned long attrs) |
359 | { | 359 | { |
360 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 360 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
361 | struct page *page; | 361 | struct page *page; |
@@ -370,7 +370,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
370 | pa = page_to_phys(page); | 370 | pa = page_to_phys(page); |
371 | memset((void *) pa, 0, size); | 371 | memset((void *) pa, 0, size); |
372 | 372 | ||
373 | map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL); | 373 | map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); |
374 | if (dma_mapping_error(dev, map)) { | 374 | if (dma_mapping_error(dev, map)) { |
375 | free_pages(pa, get_order(size)); | 375 | free_pages(pa, get_order(size)); |
376 | return NULL; | 376 | return NULL; |
@@ -384,19 +384,19 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
384 | 384 | ||
385 | static void s390_dma_free(struct device *dev, size_t size, | 385 | static void s390_dma_free(struct device *dev, size_t size, |
386 | void *pa, dma_addr_t dma_handle, | 386 | void *pa, dma_addr_t dma_handle, |
387 | struct dma_attrs *attrs) | 387 | unsigned long attrs) |
388 | { | 388 | { |
389 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 389 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
390 | 390 | ||
391 | size = PAGE_ALIGN(size); | 391 | size = PAGE_ALIGN(size); |
392 | atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); | 392 | atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); |
393 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 393 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); |
394 | free_pages((unsigned long) pa, get_order(size)); | 394 | free_pages((unsigned long) pa, get_order(size)); |
395 | } | 395 | } |
396 | 396 | ||
397 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | 397 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, |
398 | int nr_elements, enum dma_data_direction dir, | 398 | int nr_elements, enum dma_data_direction dir, |
399 | struct dma_attrs *attrs) | 399 | unsigned long attrs) |
400 | { | 400 | { |
401 | int mapped_elements = 0; | 401 | int mapped_elements = 0; |
402 | struct scatterlist *s; | 402 | struct scatterlist *s; |
@@ -405,7 +405,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
405 | for_each_sg(sg, s, nr_elements, i) { | 405 | for_each_sg(sg, s, nr_elements, i) { |
406 | struct page *page = sg_page(s); | 406 | struct page *page = sg_page(s); |
407 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, | 407 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, |
408 | s->length, dir, NULL); | 408 | s->length, dir, 0); |
409 | if (!dma_mapping_error(dev, s->dma_address)) { | 409 | if (!dma_mapping_error(dev, s->dma_address)) { |
410 | s->dma_length = s->length; | 410 | s->dma_length = s->length; |
411 | mapped_elements++; | 411 | mapped_elements++; |
@@ -419,7 +419,7 @@ unmap: | |||
419 | for_each_sg(sg, s, mapped_elements, i) { | 419 | for_each_sg(sg, s, mapped_elements, i) { |
420 | if (s->dma_address) | 420 | if (s->dma_address) |
421 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, | 421 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, |
422 | dir, NULL); | 422 | dir, 0); |
423 | s->dma_address = 0; | 423 | s->dma_address = 0; |
424 | s->dma_length = 0; | 424 | s->dma_length = 0; |
425 | } | 425 | } |
@@ -429,13 +429,14 @@ unmap: | |||
429 | 429 | ||
430 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 430 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
431 | int nr_elements, enum dma_data_direction dir, | 431 | int nr_elements, enum dma_data_direction dir, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | struct scatterlist *s; | 434 | struct scatterlist *s; |
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | for_each_sg(sg, s, nr_elements, i) { | 437 | for_each_sg(sg, s, nr_elements, i) { |
438 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); | 438 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, |
439 | 0); | ||
439 | s->dma_address = 0; | 440 | s->dma_address = 0; |
440 | s->dma_length = 0; | 441 | s->dma_length = 0; |
441 | } | 442 | } |
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index e11cf0c8206b..0052ad40e86d 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -17,9 +17,9 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
17 | /* arch/sh/mm/consistent.c */ | 17 | /* arch/sh/mm/consistent.c */ |
18 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 18 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
19 | dma_addr_t *dma_addr, gfp_t flag, | 19 | dma_addr_t *dma_addr, gfp_t flag, |
20 | struct dma_attrs *attrs); | 20 | unsigned long attrs); |
21 | extern void dma_generic_free_coherent(struct device *dev, size_t size, | 21 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
22 | void *vaddr, dma_addr_t dma_handle, | 22 | void *vaddr, dma_addr_t dma_handle, |
23 | struct dma_attrs *attrs); | 23 | unsigned long attrs); |
24 | 24 | ||
25 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 25 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 5b0bfcda6d0b..eadb669a7329 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -13,7 +13,7 @@ | |||
13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
14 | unsigned long offset, size_t size, | 14 | unsigned long offset, size_t size, |
15 | enum dma_data_direction dir, | 15 | enum dma_data_direction dir, |
16 | struct dma_attrs *attrs) | 16 | unsigned long attrs) |
17 | { | 17 | { |
18 | dma_addr_t addr = page_to_phys(page) + offset; | 18 | dma_addr_t addr = page_to_phys(page) + offset; |
19 | 19 | ||
@@ -25,7 +25,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
25 | 25 | ||
26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | 26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, |
27 | int nents, enum dma_data_direction dir, | 27 | int nents, enum dma_data_direction dir, |
28 | struct dma_attrs *attrs) | 28 | unsigned long attrs) |
29 | { | 29 | { |
30 | struct scatterlist *s; | 30 | struct scatterlist *s; |
31 | int i; | 31 | int i; |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index b81d9dbf9fef..92b6976fde59 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -34,7 +34,7 @@ fs_initcall(dma_init); | |||
34 | 34 | ||
35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, | 36 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | struct dma_attrs *attrs) | 37 | unsigned long attrs) |
38 | { | 38 | { |
39 | void *ret, *ret_nocache; | 39 | void *ret, *ret_nocache; |
40 | int order = get_order(size); | 40 | int order = get_order(size); |
@@ -66,7 +66,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
66 | 66 | ||
67 | void dma_generic_free_coherent(struct device *dev, size_t size, | 67 | void dma_generic_free_coherent(struct device *dev, size_t size, |
68 | void *vaddr, dma_addr_t dma_handle, | 68 | void *vaddr, dma_addr_t dma_handle, |
69 | struct dma_attrs *attrs) | 69 | unsigned long attrs) |
70 | { | 70 | { |
71 | int order = get_order(size); | 71 | int order = get_order(size); |
72 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 72 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 37686828c3d9..5c615abff030 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -196,7 +196,7 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) | |||
196 | 196 | ||
197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | 197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, |
198 | dma_addr_t *dma_addrp, gfp_t gfp, | 198 | dma_addr_t *dma_addrp, gfp_t gfp, |
199 | struct dma_attrs *attrs) | 199 | unsigned long attrs) |
200 | { | 200 | { |
201 | unsigned long order, first_page; | 201 | unsigned long order, first_page; |
202 | struct iommu *iommu; | 202 | struct iommu *iommu; |
@@ -245,7 +245,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | |||
245 | 245 | ||
246 | static void dma_4u_free_coherent(struct device *dev, size_t size, | 246 | static void dma_4u_free_coherent(struct device *dev, size_t size, |
247 | void *cpu, dma_addr_t dvma, | 247 | void *cpu, dma_addr_t dvma, |
248 | struct dma_attrs *attrs) | 248 | unsigned long attrs) |
249 | { | 249 | { |
250 | struct iommu *iommu; | 250 | struct iommu *iommu; |
251 | unsigned long order, npages; | 251 | unsigned long order, npages; |
@@ -263,7 +263,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | 263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
264 | unsigned long offset, size_t sz, | 264 | unsigned long offset, size_t sz, |
265 | enum dma_data_direction direction, | 265 | enum dma_data_direction direction, |
266 | struct dma_attrs *attrs) | 266 | unsigned long attrs) |
267 | { | 267 | { |
268 | struct iommu *iommu; | 268 | struct iommu *iommu; |
269 | struct strbuf *strbuf; | 269 | struct strbuf *strbuf; |
@@ -385,7 +385,7 @@ do_flush_sync: | |||
385 | 385 | ||
386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | 386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
387 | size_t sz, enum dma_data_direction direction, | 387 | size_t sz, enum dma_data_direction direction, |
388 | struct dma_attrs *attrs) | 388 | unsigned long attrs) |
389 | { | 389 | { |
390 | struct iommu *iommu; | 390 | struct iommu *iommu; |
391 | struct strbuf *strbuf; | 391 | struct strbuf *strbuf; |
@@ -431,7 +431,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
431 | 431 | ||
432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
433 | int nelems, enum dma_data_direction direction, | 433 | int nelems, enum dma_data_direction direction, |
434 | struct dma_attrs *attrs) | 434 | unsigned long attrs) |
435 | { | 435 | { |
436 | struct scatterlist *s, *outs, *segstart; | 436 | struct scatterlist *s, *outs, *segstart; |
437 | unsigned long flags, handle, prot, ctx; | 437 | unsigned long flags, handle, prot, ctx; |
@@ -607,7 +607,7 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | |||
607 | 607 | ||
608 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 608 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
609 | int nelems, enum dma_data_direction direction, | 609 | int nelems, enum dma_data_direction direction, |
610 | struct dma_attrs *attrs) | 610 | unsigned long attrs) |
611 | { | 611 | { |
612 | unsigned long flags, ctx; | 612 | unsigned long flags, ctx; |
613 | struct scatterlist *sg; | 613 | struct scatterlist *sg; |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index ffd5ff4678cf..2344103414d1 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -260,7 +260,7 @@ EXPORT_SYMBOL(sbus_set_sbus64); | |||
260 | */ | 260 | */ |
261 | static void *sbus_alloc_coherent(struct device *dev, size_t len, | 261 | static void *sbus_alloc_coherent(struct device *dev, size_t len, |
262 | dma_addr_t *dma_addrp, gfp_t gfp, | 262 | dma_addr_t *dma_addrp, gfp_t gfp, |
263 | struct dma_attrs *attrs) | 263 | unsigned long attrs) |
264 | { | 264 | { |
265 | struct platform_device *op = to_platform_device(dev); | 265 | struct platform_device *op = to_platform_device(dev); |
266 | unsigned long len_total = PAGE_ALIGN(len); | 266 | unsigned long len_total = PAGE_ALIGN(len); |
@@ -315,7 +315,7 @@ err_nopages: | |||
315 | } | 315 | } |
316 | 316 | ||
317 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, | 317 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, |
318 | dma_addr_t ba, struct dma_attrs *attrs) | 318 | dma_addr_t ba, unsigned long attrs) |
319 | { | 319 | { |
320 | struct resource *res; | 320 | struct resource *res; |
321 | struct page *pgv; | 321 | struct page *pgv; |
@@ -355,7 +355,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, | |||
355 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, | 355 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, |
356 | unsigned long offset, size_t len, | 356 | unsigned long offset, size_t len, |
357 | enum dma_data_direction dir, | 357 | enum dma_data_direction dir, |
358 | struct dma_attrs *attrs) | 358 | unsigned long attrs) |
359 | { | 359 | { |
360 | void *va = page_address(page) + offset; | 360 | void *va = page_address(page) + offset; |
361 | 361 | ||
@@ -371,20 +371,20 @@ static dma_addr_t sbus_map_page(struct device *dev, struct page *page, | |||
371 | } | 371 | } |
372 | 372 | ||
373 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, | 373 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, |
374 | enum dma_data_direction dir, struct dma_attrs *attrs) | 374 | enum dma_data_direction dir, unsigned long attrs) |
375 | { | 375 | { |
376 | mmu_release_scsi_one(dev, ba, n); | 376 | mmu_release_scsi_one(dev, ba, n); |
377 | } | 377 | } |
378 | 378 | ||
379 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, | 379 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, |
380 | enum dma_data_direction dir, struct dma_attrs *attrs) | 380 | enum dma_data_direction dir, unsigned long attrs) |
381 | { | 381 | { |
382 | mmu_get_scsi_sgl(dev, sg, n); | 382 | mmu_get_scsi_sgl(dev, sg, n); |
383 | return n; | 383 | return n; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, | 386 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, |
387 | enum dma_data_direction dir, struct dma_attrs *attrs) | 387 | enum dma_data_direction dir, unsigned long attrs) |
388 | { | 388 | { |
389 | mmu_release_scsi_sgl(dev, sg, n); | 389 | mmu_release_scsi_sgl(dev, sg, n); |
390 | } | 390 | } |
@@ -429,7 +429,7 @@ arch_initcall(sparc_register_ioport); | |||
429 | */ | 429 | */ |
430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, | 430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
431 | dma_addr_t *pba, gfp_t gfp, | 431 | dma_addr_t *pba, gfp_t gfp, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | unsigned long len_total = PAGE_ALIGN(len); | 434 | unsigned long len_total = PAGE_ALIGN(len); |
435 | void *va; | 435 | void *va; |
@@ -482,7 +482,7 @@ err_nopages: | |||
482 | * past this call are illegal. | 482 | * past this call are illegal. |
483 | */ | 483 | */ |
484 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, | 484 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, |
485 | dma_addr_t ba, struct dma_attrs *attrs) | 485 | dma_addr_t ba, unsigned long attrs) |
486 | { | 486 | { |
487 | struct resource *res; | 487 | struct resource *res; |
488 | 488 | ||
@@ -518,14 +518,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
518 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, | 518 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, |
519 | unsigned long offset, size_t size, | 519 | unsigned long offset, size_t size, |
520 | enum dma_data_direction dir, | 520 | enum dma_data_direction dir, |
521 | struct dma_attrs *attrs) | 521 | unsigned long attrs) |
522 | { | 522 | { |
523 | /* IIep is write-through, not flushing. */ | 523 | /* IIep is write-through, not flushing. */ |
524 | return page_to_phys(page) + offset; | 524 | return page_to_phys(page) + offset; |
525 | } | 525 | } |
526 | 526 | ||
527 | static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | 527 | static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, |
528 | enum dma_data_direction dir, struct dma_attrs *attrs) | 528 | enum dma_data_direction dir, unsigned long attrs) |
529 | { | 529 | { |
530 | if (dir != PCI_DMA_TODEVICE) | 530 | if (dir != PCI_DMA_TODEVICE) |
531 | dma_make_coherent(ba, PAGE_ALIGN(size)); | 531 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
@@ -548,7 +548,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | |||
548 | */ | 548 | */ |
549 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | 549 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, |
550 | int nents, enum dma_data_direction dir, | 550 | int nents, enum dma_data_direction dir, |
551 | struct dma_attrs *attrs) | 551 | unsigned long attrs) |
552 | { | 552 | { |
553 | struct scatterlist *sg; | 553 | struct scatterlist *sg; |
554 | int n; | 554 | int n; |
@@ -567,7 +567,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | |||
567 | */ | 567 | */ |
568 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | 568 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, |
569 | int nents, enum dma_data_direction dir, | 569 | int nents, enum dma_data_direction dir, |
570 | struct dma_attrs *attrs) | 570 | unsigned long attrs) |
571 | { | 571 | { |
572 | struct scatterlist *sg; | 572 | struct scatterlist *sg; |
573 | int n; | 573 | int n; |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 836e8cef47e2..61c6f935accc 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -130,7 +130,7 @@ static inline long iommu_batch_end(void) | |||
130 | 130 | ||
131 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | 131 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
132 | dma_addr_t *dma_addrp, gfp_t gfp, | 132 | dma_addr_t *dma_addrp, gfp_t gfp, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | unsigned long flags, order, first_page, npages, n; | 135 | unsigned long flags, order, first_page, npages, n; |
136 | struct iommu *iommu; | 136 | struct iommu *iommu; |
@@ -213,7 +213,7 @@ static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, | |||
213 | } | 213 | } |
214 | 214 | ||
215 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | 215 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, |
216 | dma_addr_t dvma, struct dma_attrs *attrs) | 216 | dma_addr_t dvma, unsigned long attrs) |
217 | { | 217 | { |
218 | struct pci_pbm_info *pbm; | 218 | struct pci_pbm_info *pbm; |
219 | struct iommu *iommu; | 219 | struct iommu *iommu; |
@@ -235,7 +235,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
235 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | 235 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
236 | unsigned long offset, size_t sz, | 236 | unsigned long offset, size_t sz, |
237 | enum dma_data_direction direction, | 237 | enum dma_data_direction direction, |
238 | struct dma_attrs *attrs) | 238 | unsigned long attrs) |
239 | { | 239 | { |
240 | struct iommu *iommu; | 240 | struct iommu *iommu; |
241 | unsigned long flags, npages, oaddr; | 241 | unsigned long flags, npages, oaddr; |
@@ -294,7 +294,7 @@ iommu_map_fail: | |||
294 | 294 | ||
295 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | 295 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
296 | size_t sz, enum dma_data_direction direction, | 296 | size_t sz, enum dma_data_direction direction, |
297 | struct dma_attrs *attrs) | 297 | unsigned long attrs) |
298 | { | 298 | { |
299 | struct pci_pbm_info *pbm; | 299 | struct pci_pbm_info *pbm; |
300 | struct iommu *iommu; | 300 | struct iommu *iommu; |
@@ -322,7 +322,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
322 | 322 | ||
323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
324 | int nelems, enum dma_data_direction direction, | 324 | int nelems, enum dma_data_direction direction, |
325 | struct dma_attrs *attrs) | 325 | unsigned long attrs) |
326 | { | 326 | { |
327 | struct scatterlist *s, *outs, *segstart; | 327 | struct scatterlist *s, *outs, *segstart; |
328 | unsigned long flags, handle, prot; | 328 | unsigned long flags, handle, prot; |
@@ -466,7 +466,7 @@ iommu_map_failed: | |||
466 | 466 | ||
467 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 467 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
468 | int nelems, enum dma_data_direction direction, | 468 | int nelems, enum dma_data_direction direction, |
469 | struct dma_attrs *attrs) | 469 | unsigned long attrs) |
470 | { | 470 | { |
471 | struct pci_pbm_info *pbm; | 471 | struct pci_pbm_info *pbm; |
472 | struct scatterlist *sg; | 472 | struct scatterlist *sg; |
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b6bc0547a4f6..09bb774b39cd 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | 35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, | 36 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | struct dma_attrs *attrs) | 37 | unsigned long attrs) |
38 | { | 38 | { |
39 | u64 dma_mask = (dev && dev->coherent_dma_mask) ? | 39 | u64 dma_mask = (dev && dev->coherent_dma_mask) ? |
40 | dev->coherent_dma_mask : DMA_BIT_MASK(32); | 40 | dev->coherent_dma_mask : DMA_BIT_MASK(32); |
@@ -78,7 +78,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | |||
78 | */ | 78 | */ |
79 | static void tile_dma_free_coherent(struct device *dev, size_t size, | 79 | static void tile_dma_free_coherent(struct device *dev, size_t size, |
80 | void *vaddr, dma_addr_t dma_handle, | 80 | void *vaddr, dma_addr_t dma_handle, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 83 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
84 | } | 84 | } |
@@ -202,7 +202,7 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, | |||
202 | 202 | ||
203 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 203 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
204 | int nents, enum dma_data_direction direction, | 204 | int nents, enum dma_data_direction direction, |
205 | struct dma_attrs *attrs) | 205 | unsigned long attrs) |
206 | { | 206 | { |
207 | struct scatterlist *sg; | 207 | struct scatterlist *sg; |
208 | int i; | 208 | int i; |
@@ -224,7 +224,7 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
224 | 224 | ||
225 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 225 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
226 | int nents, enum dma_data_direction direction, | 226 | int nents, enum dma_data_direction direction, |
227 | struct dma_attrs *attrs) | 227 | unsigned long attrs) |
228 | { | 228 | { |
229 | struct scatterlist *sg; | 229 | struct scatterlist *sg; |
230 | int i; | 230 | int i; |
@@ -240,7 +240,7 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
240 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | 240 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, |
241 | unsigned long offset, size_t size, | 241 | unsigned long offset, size_t size, |
242 | enum dma_data_direction direction, | 242 | enum dma_data_direction direction, |
243 | struct dma_attrs *attrs) | 243 | unsigned long attrs) |
244 | { | 244 | { |
245 | BUG_ON(!valid_dma_direction(direction)); | 245 | BUG_ON(!valid_dma_direction(direction)); |
246 | 246 | ||
@@ -252,7 +252,7 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | |||
252 | 252 | ||
253 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 253 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
254 | size_t size, enum dma_data_direction direction, | 254 | size_t size, enum dma_data_direction direction, |
255 | struct dma_attrs *attrs) | 255 | unsigned long attrs) |
256 | { | 256 | { |
257 | BUG_ON(!valid_dma_direction(direction)); | 257 | BUG_ON(!valid_dma_direction(direction)); |
258 | 258 | ||
@@ -343,7 +343,7 @@ EXPORT_SYMBOL(tile_dma_map_ops); | |||
343 | 343 | ||
344 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | 344 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, |
345 | dma_addr_t *dma_handle, gfp_t gfp, | 345 | dma_addr_t *dma_handle, gfp_t gfp, |
346 | struct dma_attrs *attrs) | 346 | unsigned long attrs) |
347 | { | 347 | { |
348 | int node = dev_to_node(dev); | 348 | int node = dev_to_node(dev); |
349 | int order = get_order(size); | 349 | int order = get_order(size); |
@@ -368,14 +368,14 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | |||
368 | */ | 368 | */ |
369 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, | 369 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, |
370 | void *vaddr, dma_addr_t dma_handle, | 370 | void *vaddr, dma_addr_t dma_handle, |
371 | struct dma_attrs *attrs) | 371 | unsigned long attrs) |
372 | { | 372 | { |
373 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 373 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
374 | } | 374 | } |
375 | 375 | ||
376 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 376 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
377 | int nents, enum dma_data_direction direction, | 377 | int nents, enum dma_data_direction direction, |
378 | struct dma_attrs *attrs) | 378 | unsigned long attrs) |
379 | { | 379 | { |
380 | struct scatterlist *sg; | 380 | struct scatterlist *sg; |
381 | int i; | 381 | int i; |
@@ -400,7 +400,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
400 | static void tile_pci_dma_unmap_sg(struct device *dev, | 400 | static void tile_pci_dma_unmap_sg(struct device *dev, |
401 | struct scatterlist *sglist, int nents, | 401 | struct scatterlist *sglist, int nents, |
402 | enum dma_data_direction direction, | 402 | enum dma_data_direction direction, |
403 | struct dma_attrs *attrs) | 403 | unsigned long attrs) |
404 | { | 404 | { |
405 | struct scatterlist *sg; | 405 | struct scatterlist *sg; |
406 | int i; | 406 | int i; |
@@ -416,7 +416,7 @@ static void tile_pci_dma_unmap_sg(struct device *dev, | |||
416 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, | 416 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, |
417 | unsigned long offset, size_t size, | 417 | unsigned long offset, size_t size, |
418 | enum dma_data_direction direction, | 418 | enum dma_data_direction direction, |
419 | struct dma_attrs *attrs) | 419 | unsigned long attrs) |
420 | { | 420 | { |
421 | BUG_ON(!valid_dma_direction(direction)); | 421 | BUG_ON(!valid_dma_direction(direction)); |
422 | 422 | ||
@@ -429,7 +429,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, | |||
429 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 429 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
430 | size_t size, | 430 | size_t size, |
431 | enum dma_data_direction direction, | 431 | enum dma_data_direction direction, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | BUG_ON(!valid_dma_direction(direction)); | 434 | BUG_ON(!valid_dma_direction(direction)); |
435 | 435 | ||
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops); | |||
531 | #ifdef CONFIG_SWIOTLB | 531 | #ifdef CONFIG_SWIOTLB |
532 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | 532 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, |
533 | dma_addr_t *dma_handle, gfp_t gfp, | 533 | dma_addr_t *dma_handle, gfp_t gfp, |
534 | struct dma_attrs *attrs) | 534 | unsigned long attrs) |
535 | { | 535 | { |
536 | gfp |= GFP_DMA; | 536 | gfp |= GFP_DMA; |
537 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | 537 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); |
@@ -539,7 +539,7 @@ static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | |||
539 | 539 | ||
540 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | 540 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, |
541 | void *vaddr, dma_addr_t dma_addr, | 541 | void *vaddr, dma_addr_t dma_addr, |
542 | struct dma_attrs *attrs) | 542 | unsigned long attrs) |
543 | { | 543 | { |
544 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 544 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
545 | } | 545 | } |
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index 16c08b2143a7..3e9f6489ba38 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c | |||
@@ -19,14 +19,14 @@ | |||
19 | 19 | ||
20 | static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, | 20 | static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, |
21 | dma_addr_t *dma_handle, gfp_t flags, | 21 | dma_addr_t *dma_handle, gfp_t flags, |
22 | struct dma_attrs *attrs) | 22 | unsigned long attrs) |
23 | { | 23 | { |
24 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 24 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
25 | } | 25 | } |
26 | 26 | ||
27 | static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, | 27 | static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, |
28 | void *vaddr, dma_addr_t dma_addr, | 28 | void *vaddr, dma_addr_t dma_addr, |
29 | struct dma_attrs *attrs) | 29 | unsigned long attrs) |
30 | { | 30 | { |
31 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 31 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
32 | } | 32 | } |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3a27b93e6261..44461626830e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/kmemcheck.h> | 9 | #include <linux/kmemcheck.h> |
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/dma-debug.h> | 11 | #include <linux/dma-debug.h> |
12 | #include <linux/dma-attrs.h> | ||
13 | #include <asm/io.h> | 12 | #include <asm/io.h> |
14 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
15 | #include <linux/dma-contiguous.h> | 14 | #include <linux/dma-contiguous.h> |
@@ -48,11 +47,11 @@ extern int dma_supported(struct device *hwdev, u64 mask); | |||
48 | 47 | ||
49 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 48 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
50 | dma_addr_t *dma_addr, gfp_t flag, | 49 | dma_addr_t *dma_addr, gfp_t flag, |
51 | struct dma_attrs *attrs); | 50 | unsigned long attrs); |
52 | 51 | ||
53 | extern void dma_generic_free_coherent(struct device *dev, size_t size, | 52 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
54 | void *vaddr, dma_addr_t dma_addr, | 53 | void *vaddr, dma_addr_t dma_addr, |
55 | struct dma_attrs *attrs); | 54 | unsigned long attrs); |
56 | 55 | ||
57 | #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ | 56 | #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ |
58 | extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); | 57 | extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index ab05d73e2bb7..d2f69b9ff732 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -31,9 +31,9 @@ static inline void dma_mark_clean(void *addr, size_t size) {} | |||
31 | 31 | ||
32 | extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 32 | extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
33 | dma_addr_t *dma_handle, gfp_t flags, | 33 | dma_addr_t *dma_handle, gfp_t flags, |
34 | struct dma_attrs *attrs); | 34 | unsigned long attrs); |
35 | extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, | 35 | extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, |
36 | void *vaddr, dma_addr_t dma_addr, | 36 | void *vaddr, dma_addr_t dma_addr, |
37 | struct dma_attrs *attrs); | 37 | unsigned long attrs); |
38 | 38 | ||
39 | #endif /* _ASM_X86_SWIOTLB_H */ | 39 | #endif /* _ASM_X86_SWIOTLB_H */ |
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h index acd844c017d3..f02f025ff988 100644 --- a/arch/x86/include/asm/xen/page-coherent.h +++ b/arch/x86/include/asm/xen/page-coherent.h | |||
@@ -2,12 +2,11 @@ | |||
2 | #define _ASM_X86_XEN_PAGE_COHERENT_H | 2 | #define _ASM_X86_XEN_PAGE_COHERENT_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | 5 | #include <linux/dma-mapping.h> |
7 | 6 | ||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 7 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
9 | dma_addr_t *dma_handle, gfp_t flags, | 8 | dma_addr_t *dma_handle, gfp_t flags, |
10 | struct dma_attrs *attrs) | 9 | unsigned long attrs) |
11 | { | 10 | { |
12 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); | 11 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); |
13 | *dma_handle = virt_to_phys(vstart); | 12 | *dma_handle = virt_to_phys(vstart); |
@@ -16,18 +15,18 @@ static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | |||
16 | 15 | ||
17 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | 16 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, |
18 | void *cpu_addr, dma_addr_t dma_handle, | 17 | void *cpu_addr, dma_addr_t dma_handle, |
19 | struct dma_attrs *attrs) | 18 | unsigned long attrs) |
20 | { | 19 | { |
21 | free_pages((unsigned long) cpu_addr, get_order(size)); | 20 | free_pages((unsigned long) cpu_addr, get_order(size)); |
22 | } | 21 | } |
23 | 22 | ||
24 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 23 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
25 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 24 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
26 | enum dma_data_direction dir, struct dma_attrs *attrs) { } | 25 | enum dma_data_direction dir, unsigned long attrs) { } |
27 | 26 | ||
28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 27 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
29 | size_t size, enum dma_data_direction dir, | 28 | size_t size, enum dma_data_direction dir, |
30 | struct dma_attrs *attrs) { } | 29 | unsigned long attrs) { } |
31 | 30 | ||
32 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 31 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | 32 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } |
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 42d27a62a404..63ff468a7986 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c | |||
@@ -241,7 +241,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
241 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, | 241 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
242 | unsigned long offset, size_t size, | 242 | unsigned long offset, size_t size, |
243 | enum dma_data_direction dir, | 243 | enum dma_data_direction dir, |
244 | struct dma_attrs *attrs) | 244 | unsigned long attrs) |
245 | { | 245 | { |
246 | unsigned long bus; | 246 | unsigned long bus; |
247 | phys_addr_t paddr = page_to_phys(page) + offset; | 247 | phys_addr_t paddr = page_to_phys(page) + offset; |
@@ -263,7 +263,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, | |||
263 | */ | 263 | */ |
264 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | 264 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
265 | size_t size, enum dma_data_direction dir, | 265 | size_t size, enum dma_data_direction dir, |
266 | struct dma_attrs *attrs) | 266 | unsigned long attrs) |
267 | { | 267 | { |
268 | unsigned long iommu_page; | 268 | unsigned long iommu_page; |
269 | int npages; | 269 | int npages; |
@@ -285,7 +285,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
285 | * Wrapper for pci_unmap_single working with scatterlists. | 285 | * Wrapper for pci_unmap_single working with scatterlists. |
286 | */ | 286 | */ |
287 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 287 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
288 | enum dma_data_direction dir, struct dma_attrs *attrs) | 288 | enum dma_data_direction dir, unsigned long attrs) |
289 | { | 289 | { |
290 | struct scatterlist *s; | 290 | struct scatterlist *s; |
291 | int i; | 291 | int i; |
@@ -293,7 +293,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
293 | for_each_sg(sg, s, nents, i) { | 293 | for_each_sg(sg, s, nents, i) { |
294 | if (!s->dma_length || !s->length) | 294 | if (!s->dma_length || !s->length) |
295 | break; | 295 | break; |
296 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); | 296 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
@@ -315,7 +315,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
315 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 315 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
316 | if (addr == bad_dma_addr) { | 316 | if (addr == bad_dma_addr) { |
317 | if (i > 0) | 317 | if (i > 0) |
318 | gart_unmap_sg(dev, sg, i, dir, NULL); | 318 | gart_unmap_sg(dev, sg, i, dir, 0); |
319 | nents = 0; | 319 | nents = 0; |
320 | sg[0].dma_length = 0; | 320 | sg[0].dma_length = 0; |
321 | break; | 321 | break; |
@@ -386,7 +386,7 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, | |||
386 | * Merge chunks that have page aligned sizes into a continuous mapping. | 386 | * Merge chunks that have page aligned sizes into a continuous mapping. |
387 | */ | 387 | */ |
388 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 388 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
389 | enum dma_data_direction dir, struct dma_attrs *attrs) | 389 | enum dma_data_direction dir, unsigned long attrs) |
390 | { | 390 | { |
391 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 391 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
392 | int need = 0, nextneed, i, out, start; | 392 | int need = 0, nextneed, i, out, start; |
@@ -456,7 +456,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
456 | 456 | ||
457 | error: | 457 | error: |
458 | flush_gart(); | 458 | flush_gart(); |
459 | gart_unmap_sg(dev, sg, out, dir, NULL); | 459 | gart_unmap_sg(dev, sg, out, dir, 0); |
460 | 460 | ||
461 | /* When it was forced or merged try again in a dumb way */ | 461 | /* When it was forced or merged try again in a dumb way */ |
462 | if (force_iommu || iommu_merge) { | 462 | if (force_iommu || iommu_merge) { |
@@ -476,7 +476,7 @@ error: | |||
476 | /* allocate and map a coherent mapping */ | 476 | /* allocate and map a coherent mapping */ |
477 | static void * | 477 | static void * |
478 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | 478 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
479 | gfp_t flag, struct dma_attrs *attrs) | 479 | gfp_t flag, unsigned long attrs) |
480 | { | 480 | { |
481 | dma_addr_t paddr; | 481 | dma_addr_t paddr; |
482 | unsigned long align_mask; | 482 | unsigned long align_mask; |
@@ -508,9 +508,9 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |||
508 | /* free a coherent mapping */ | 508 | /* free a coherent mapping */ |
509 | static void | 509 | static void |
510 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 510 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
511 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 511 | dma_addr_t dma_addr, unsigned long attrs) |
512 | { | 512 | { |
513 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); | 513 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); |
514 | dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); | 514 | dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); |
515 | } | 515 | } |
516 | 516 | ||
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 833b1d329c47..5d400ba1349d 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -340,7 +340,7 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
340 | 340 | ||
341 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, | 341 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, |
342 | int nelems,enum dma_data_direction dir, | 342 | int nelems,enum dma_data_direction dir, |
343 | struct dma_attrs *attrs) | 343 | unsigned long attrs) |
344 | { | 344 | { |
345 | struct iommu_table *tbl = find_iommu_table(dev); | 345 | struct iommu_table *tbl = find_iommu_table(dev); |
346 | struct scatterlist *s; | 346 | struct scatterlist *s; |
@@ -364,7 +364,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
364 | 364 | ||
365 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | 365 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, |
366 | int nelems, enum dma_data_direction dir, | 366 | int nelems, enum dma_data_direction dir, |
367 | struct dma_attrs *attrs) | 367 | unsigned long attrs) |
368 | { | 368 | { |
369 | struct iommu_table *tbl = find_iommu_table(dev); | 369 | struct iommu_table *tbl = find_iommu_table(dev); |
370 | struct scatterlist *s; | 370 | struct scatterlist *s; |
@@ -396,7 +396,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
396 | 396 | ||
397 | return nelems; | 397 | return nelems; |
398 | error: | 398 | error: |
399 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); | 399 | calgary_unmap_sg(dev, sg, nelems, dir, 0); |
400 | for_each_sg(sg, s, nelems, i) { | 400 | for_each_sg(sg, s, nelems, i) { |
401 | sg->dma_address = DMA_ERROR_CODE; | 401 | sg->dma_address = DMA_ERROR_CODE; |
402 | sg->dma_length = 0; | 402 | sg->dma_length = 0; |
@@ -407,7 +407,7 @@ error: | |||
407 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, | 407 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, |
408 | unsigned long offset, size_t size, | 408 | unsigned long offset, size_t size, |
409 | enum dma_data_direction dir, | 409 | enum dma_data_direction dir, |
410 | struct dma_attrs *attrs) | 410 | unsigned long attrs) |
411 | { | 411 | { |
412 | void *vaddr = page_address(page) + offset; | 412 | void *vaddr = page_address(page) + offset; |
413 | unsigned long uaddr; | 413 | unsigned long uaddr; |
@@ -422,7 +422,7 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, | |||
422 | 422 | ||
423 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | 423 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, |
424 | size_t size, enum dma_data_direction dir, | 424 | size_t size, enum dma_data_direction dir, |
425 | struct dma_attrs *attrs) | 425 | unsigned long attrs) |
426 | { | 426 | { |
427 | struct iommu_table *tbl = find_iommu_table(dev); | 427 | struct iommu_table *tbl = find_iommu_table(dev); |
428 | unsigned int npages; | 428 | unsigned int npages; |
@@ -432,7 +432,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 434 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
435 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 435 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
436 | { | 436 | { |
437 | void *ret = NULL; | 437 | void *ret = NULL; |
438 | dma_addr_t mapping; | 438 | dma_addr_t mapping; |
@@ -466,7 +466,7 @@ error: | |||
466 | 466 | ||
467 | static void calgary_free_coherent(struct device *dev, size_t size, | 467 | static void calgary_free_coherent(struct device *dev, size_t size, |
468 | void *vaddr, dma_addr_t dma_handle, | 468 | void *vaddr, dma_addr_t dma_handle, |
469 | struct dma_attrs *attrs) | 469 | unsigned long attrs) |
470 | { | 470 | { |
471 | unsigned int npages; | 471 | unsigned int npages; |
472 | struct iommu_table *tbl = find_iommu_table(dev); | 472 | struct iommu_table *tbl = find_iommu_table(dev); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 6ba014c61d62..d30c37750765 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -77,7 +77,7 @@ void __init pci_iommu_alloc(void) | |||
77 | } | 77 | } |
78 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 78 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t *dma_addr, gfp_t flag, | 79 | dma_addr_t *dma_addr, gfp_t flag, |
80 | struct dma_attrs *attrs) | 80 | unsigned long attrs) |
81 | { | 81 | { |
82 | unsigned long dma_mask; | 82 | unsigned long dma_mask; |
83 | struct page *page; | 83 | struct page *page; |
@@ -120,7 +120,7 @@ again: | |||
120 | } | 120 | } |
121 | 121 | ||
122 | void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, | 122 | void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, |
123 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 123 | dma_addr_t dma_addr, unsigned long attrs) |
124 | { | 124 | { |
125 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 125 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
126 | struct page *page = virt_to_page(vaddr); | 126 | struct page *page = virt_to_page(vaddr); |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index da15918d1c81..00e71ce396a8 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -28,7 +28,7 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | |||
28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
29 | unsigned long offset, size_t size, | 29 | unsigned long offset, size_t size, |
30 | enum dma_data_direction dir, | 30 | enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 31 | unsigned long attrs) |
32 | { | 32 | { |
33 | dma_addr_t bus = page_to_phys(page) + offset; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
34 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
@@ -55,7 +55,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
55 | */ | 55 | */ |
56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
57 | int nents, enum dma_data_direction dir, | 57 | int nents, enum dma_data_direction dir, |
58 | struct dma_attrs *attrs) | 58 | unsigned long attrs) |
59 | { | 59 | { |
60 | struct scatterlist *s; | 60 | struct scatterlist *s; |
61 | int i; | 61 | int i; |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 5069ef560d83..b47edb8f5256 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -16,7 +16,7 @@ int swiotlb __read_mostly; | |||
16 | 16 | ||
17 | void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 17 | void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
18 | dma_addr_t *dma_handle, gfp_t flags, | 18 | dma_addr_t *dma_handle, gfp_t flags, |
19 | struct dma_attrs *attrs) | 19 | unsigned long attrs) |
20 | { | 20 | { |
21 | void *vaddr; | 21 | void *vaddr; |
22 | 22 | ||
@@ -37,7 +37,7 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
37 | 37 | ||
38 | void x86_swiotlb_free_coherent(struct device *dev, size_t size, | 38 | void x86_swiotlb_free_coherent(struct device *dev, size_t size, |
39 | void *vaddr, dma_addr_t dma_addr, | 39 | void *vaddr, dma_addr_t dma_addr, |
40 | struct dma_attrs *attrs) | 40 | unsigned long attrs) |
41 | { | 41 | { |
42 | if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) | 42 | if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) |
43 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 43 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 5ceda85b8687..052c1cb76305 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c | |||
@@ -169,7 +169,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, | |||
169 | size_t size, | 169 | size_t size, |
170 | dma_addr_t *dma_handle, | 170 | dma_addr_t *dma_handle, |
171 | gfp_t flags, | 171 | gfp_t flags, |
172 | struct dma_attrs *attrs) | 172 | unsigned long attrs) |
173 | { | 173 | { |
174 | void *vaddr; | 174 | void *vaddr; |
175 | 175 | ||
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index e88b4176260f..b814ca675131 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c | |||
@@ -274,14 +274,14 @@ static struct dma_map_ops *vmd_dma_ops(struct device *dev) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | 276 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, |
277 | gfp_t flag, struct dma_attrs *attrs) | 277 | gfp_t flag, unsigned long attrs) |
278 | { | 278 | { |
279 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | 279 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, |
280 | attrs); | 280 | attrs); |
281 | } | 281 | } |
282 | 282 | ||
283 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | 283 | static void vmd_free(struct device *dev, size_t size, void *vaddr, |
284 | dma_addr_t addr, struct dma_attrs *attrs) | 284 | dma_addr_t addr, unsigned long attrs) |
285 | { | 285 | { |
286 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | 286 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, |
287 | attrs); | 287 | attrs); |
@@ -289,7 +289,7 @@ static void vmd_free(struct device *dev, size_t size, void *vaddr, | |||
289 | 289 | ||
290 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | 290 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, |
291 | void *cpu_addr, dma_addr_t addr, size_t size, | 291 | void *cpu_addr, dma_addr_t addr, size_t size, |
292 | struct dma_attrs *attrs) | 292 | unsigned long attrs) |
293 | { | 293 | { |
294 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | 294 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, |
295 | size, attrs); | 295 | size, attrs); |
@@ -297,7 +297,7 @@ static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | |||
297 | 297 | ||
298 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | 298 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, |
299 | void *cpu_addr, dma_addr_t addr, size_t size, | 299 | void *cpu_addr, dma_addr_t addr, size_t size, |
300 | struct dma_attrs *attrs) | 300 | unsigned long attrs) |
301 | { | 301 | { |
302 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | 302 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, |
303 | addr, size, attrs); | 303 | addr, size, attrs); |
@@ -306,26 +306,26 @@ static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
306 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | 306 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, |
307 | unsigned long offset, size_t size, | 307 | unsigned long offset, size_t size, |
308 | enum dma_data_direction dir, | 308 | enum dma_data_direction dir, |
309 | struct dma_attrs *attrs) | 309 | unsigned long attrs) |
310 | { | 310 | { |
311 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | 311 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, |
312 | dir, attrs); | 312 | dir, attrs); |
313 | } | 313 | } |
314 | 314 | ||
315 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | 315 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, |
316 | enum dma_data_direction dir, struct dma_attrs *attrs) | 316 | enum dma_data_direction dir, unsigned long attrs) |
317 | { | 317 | { |
318 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | 318 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); |
319 | } | 319 | } |
320 | 320 | ||
321 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 321 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
322 | enum dma_data_direction dir, struct dma_attrs *attrs) | 322 | enum dma_data_direction dir, unsigned long attrs) |
323 | { | 323 | { |
324 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 324 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 327 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
328 | enum dma_data_direction dir, struct dma_attrs *attrs) | 328 | enum dma_data_direction dir, unsigned long attrs) |
329 | { | 329 | { |
330 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 330 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
331 | } | 331 | } |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index cd66698348ca..1e68806d6695 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -142,7 +142,7 @@ static void xtensa_sync_sg_for_device(struct device *dev, | |||
142 | 142 | ||
143 | static void *xtensa_dma_alloc(struct device *dev, size_t size, | 143 | static void *xtensa_dma_alloc(struct device *dev, size_t size, |
144 | dma_addr_t *handle, gfp_t flag, | 144 | dma_addr_t *handle, gfp_t flag, |
145 | struct dma_attrs *attrs) | 145 | unsigned long attrs) |
146 | { | 146 | { |
147 | unsigned long ret; | 147 | unsigned long ret; |
148 | unsigned long uncached = 0; | 148 | unsigned long uncached = 0; |
@@ -171,7 +171,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
171 | } | 171 | } |
172 | 172 | ||
173 | static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, | 173 | static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, |
174 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 174 | dma_addr_t dma_handle, unsigned long attrs) |
175 | { | 175 | { |
176 | unsigned long addr = (unsigned long)vaddr + | 176 | unsigned long addr = (unsigned long)vaddr + |
177 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | 177 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; |
@@ -185,7 +185,7 @@ static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, | |||
185 | static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, | 185 | static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, |
186 | unsigned long offset, size_t size, | 186 | unsigned long offset, size_t size, |
187 | enum dma_data_direction dir, | 187 | enum dma_data_direction dir, |
188 | struct dma_attrs *attrs) | 188 | unsigned long attrs) |
189 | { | 189 | { |
190 | dma_addr_t dma_handle = page_to_phys(page) + offset; | 190 | dma_addr_t dma_handle = page_to_phys(page) + offset; |
191 | 191 | ||
@@ -195,14 +195,14 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, | |||
195 | 195 | ||
196 | static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, | 196 | static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, |
197 | size_t size, enum dma_data_direction dir, | 197 | size_t size, enum dma_data_direction dir, |
198 | struct dma_attrs *attrs) | 198 | unsigned long attrs) |
199 | { | 199 | { |
200 | xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); | 200 | xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); |
201 | } | 201 | } |
202 | 202 | ||
203 | static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, | 203 | static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, |
204 | int nents, enum dma_data_direction dir, | 204 | int nents, enum dma_data_direction dir, |
205 | struct dma_attrs *attrs) | 205 | unsigned long attrs) |
206 | { | 206 | { |
207 | struct scatterlist *s; | 207 | struct scatterlist *s; |
208 | int i; | 208 | int i; |
@@ -217,7 +217,7 @@ static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, | |||
217 | static void xtensa_unmap_sg(struct device *dev, | 217 | static void xtensa_unmap_sg(struct device *dev, |
218 | struct scatterlist *sg, int nents, | 218 | struct scatterlist *sg, int nents, |
219 | enum dma_data_direction dir, | 219 | enum dma_data_direction dir, |
220 | struct dma_attrs *attrs) | 220 | unsigned long attrs) |
221 | { | 221 | { |
222 | struct scatterlist *s; | 222 | struct scatterlist *s; |
223 | int i; | 223 | int i; |