diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:20:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:20:54 -0400 |
commit | f72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch) | |
tree | 90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /drivers | |
parent | 2c669275dc3245e2866a0eea15bda8ec8d1ab8db (diff) | |
parent | 1655cf8829d82d367d8fdb5cb58e5885d7d2a391 (diff) |
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig:
"This is the first pull request for the new dma-mapping subsystem
In this new subsystem we'll try to properly maintain all the generic
code related to dma-mapping, and will further consolidate arch code
into common helpers.
This pull request contains:
- removal of the DMA_ERROR_CODE macro, replacing it with calls to
->mapping_error so that the dma_map_ops instances are more self
contained and can be shared across architectures (me)
- removal of the ->set_dma_mask method, which duplicates the
->dma_capable one in terms of functionality, but requires more
duplicate code.
- various updates for the coherent dma pool and related arm code
(Vladimir)
- various smaller cleanups (me)"
* tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits)
ARM: dma-mapping: Remove traces of NOMMU code
ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus
ARM: NOMMU: Introduce dma operations for noMMU
drivers: dma-mapping: allow dma_common_mmap() for NOMMU
drivers: dma-coherent: Introduce default DMA pool
drivers: dma-coherent: Account dma_pfn_offset when used with device tree
dma: Take into account dma_pfn_offset
dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs
dma-mapping: remove dmam_free_noncoherent
crypto: qat - avoid an uninitialized variable warning
au1100fb: remove a bogus dma_free_nonconsistent call
MAINTAINERS: add entry for dma mapping helpers
powerpc: merge __dma_set_mask into dma_set_mask
dma-mapping: remove the set_dma_mask method
powerpc/cell: use the dma_supported method for ops switching
powerpc/cell: clean up fixed mapping dma_ops initialization
tile: remove dma_supported and mapping_error methods
xen-swiotlb: remove xen_swiotlb_set_dma_mask
arm: implement ->dma_supported instead of ->set_dma_mask
mips/loongson64: implement ->dma_supported instead of ->set_dma_mask
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/dma-coherent.c | 74 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 60 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 40 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 24 | ||||
-rw-r--r-- | drivers/firmware/tegra/ivc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/armada/armada_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/armada/armada_gem.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/armada/armada_gem.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fb.c | 4 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 20 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 18 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 159 | ||||
-rw-r--r-- | drivers/video/fbdev/au1100fb.c | 4 | ||||
-rw-r--r-- | drivers/video/fbdev/au1200fb.c | 5 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 113 |
16 files changed, 280 insertions, 256 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 640a7e63c453..2ae24c28e70c 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -16,8 +16,27 @@ struct dma_coherent_mem { | |||
16 | int flags; | 16 | int flags; |
17 | unsigned long *bitmap; | 17 | unsigned long *bitmap; |
18 | spinlock_t spinlock; | 18 | spinlock_t spinlock; |
19 | bool use_dev_dma_pfn_offset; | ||
19 | }; | 20 | }; |
20 | 21 | ||
22 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; | ||
23 | |||
24 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) | ||
25 | { | ||
26 | if (dev && dev->dma_mem) | ||
27 | return dev->dma_mem; | ||
28 | return dma_coherent_default_memory; | ||
29 | } | ||
30 | |||
31 | static inline dma_addr_t dma_get_device_base(struct device *dev, | ||
32 | struct dma_coherent_mem * mem) | ||
33 | { | ||
34 | if (mem->use_dev_dma_pfn_offset) | ||
35 | return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; | ||
36 | else | ||
37 | return mem->device_base; | ||
38 | } | ||
39 | |||
21 | static bool dma_init_coherent_memory( | 40 | static bool dma_init_coherent_memory( |
22 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, | 41 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, |
23 | struct dma_coherent_mem **mem) | 42 | struct dma_coherent_mem **mem) |
@@ -83,6 +102,9 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | |||
83 | static int dma_assign_coherent_memory(struct device *dev, | 102 | static int dma_assign_coherent_memory(struct device *dev, |
84 | struct dma_coherent_mem *mem) | 103 | struct dma_coherent_mem *mem) |
85 | { | 104 | { |
105 | if (!dev) | ||
106 | return -ENODEV; | ||
107 | |||
86 | if (dev->dma_mem) | 108 | if (dev->dma_mem) |
87 | return -EBUSY; | 109 | return -EBUSY; |
88 | 110 | ||
@@ -133,7 +155,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev, | |||
133 | return ERR_PTR(-EINVAL); | 155 | return ERR_PTR(-EINVAL); |
134 | 156 | ||
135 | spin_lock_irqsave(&mem->spinlock, flags); | 157 | spin_lock_irqsave(&mem->spinlock, flags); |
136 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | 158 | pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); |
137 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); | 159 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
138 | spin_unlock_irqrestore(&mem->spinlock, flags); | 160 | spin_unlock_irqrestore(&mem->spinlock, flags); |
139 | 161 | ||
@@ -161,15 +183,12 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |||
161 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | 183 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
162 | dma_addr_t *dma_handle, void **ret) | 184 | dma_addr_t *dma_handle, void **ret) |
163 | { | 185 | { |
164 | struct dma_coherent_mem *mem; | 186 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
165 | int order = get_order(size); | 187 | int order = get_order(size); |
166 | unsigned long flags; | 188 | unsigned long flags; |
167 | int pageno; | 189 | int pageno; |
168 | int dma_memory_map; | 190 | int dma_memory_map; |
169 | 191 | ||
170 | if (!dev) | ||
171 | return 0; | ||
172 | mem = dev->dma_mem; | ||
173 | if (!mem) | 192 | if (!mem) |
174 | return 0; | 193 | return 0; |
175 | 194 | ||
@@ -186,7 +205,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
186 | /* | 205 | /* |
187 | * Memory was found in the per-device area. | 206 | * Memory was found in the per-device area. |
188 | */ | 207 | */ |
189 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 208 | *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); |
190 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 209 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
191 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); | 210 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); |
192 | spin_unlock_irqrestore(&mem->spinlock, flags); | 211 | spin_unlock_irqrestore(&mem->spinlock, flags); |
@@ -223,7 +242,7 @@ EXPORT_SYMBOL(dma_alloc_from_coherent); | |||
223 | */ | 242 | */ |
224 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | 243 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
225 | { | 244 | { |
226 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 245 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
227 | 246 | ||
228 | if (mem && vaddr >= mem->virt_base && vaddr < | 247 | if (mem && vaddr >= mem->virt_base && vaddr < |
229 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 248 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
@@ -257,7 +276,7 @@ EXPORT_SYMBOL(dma_release_from_coherent); | |||
257 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | 276 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, |
258 | void *vaddr, size_t size, int *ret) | 277 | void *vaddr, size_t size, int *ret) |
259 | { | 278 | { |
260 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 279 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
261 | 280 | ||
262 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | 281 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
263 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 282 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
@@ -287,6 +306,8 @@ EXPORT_SYMBOL(dma_mmap_from_coherent); | |||
287 | #include <linux/of_fdt.h> | 306 | #include <linux/of_fdt.h> |
288 | #include <linux/of_reserved_mem.h> | 307 | #include <linux/of_reserved_mem.h> |
289 | 308 | ||
309 | static struct reserved_mem *dma_reserved_default_memory __initdata; | ||
310 | |||
290 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | 311 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
291 | { | 312 | { |
292 | struct dma_coherent_mem *mem = rmem->priv; | 313 | struct dma_coherent_mem *mem = rmem->priv; |
@@ -299,6 +320,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | |||
299 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | 320 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
300 | return -ENODEV; | 321 | return -ENODEV; |
301 | } | 322 | } |
323 | mem->use_dev_dma_pfn_offset = true; | ||
302 | rmem->priv = mem; | 324 | rmem->priv = mem; |
303 | dma_assign_coherent_memory(dev, mem); | 325 | dma_assign_coherent_memory(dev, mem); |
304 | return 0; | 326 | return 0; |
@@ -307,7 +329,8 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | |||
307 | static void rmem_dma_device_release(struct reserved_mem *rmem, | 329 | static void rmem_dma_device_release(struct reserved_mem *rmem, |
308 | struct device *dev) | 330 | struct device *dev) |
309 | { | 331 | { |
310 | dev->dma_mem = NULL; | 332 | if (dev) |
333 | dev->dma_mem = NULL; | ||
311 | } | 334 | } |
312 | 335 | ||
313 | static const struct reserved_mem_ops rmem_dma_ops = { | 336 | static const struct reserved_mem_ops rmem_dma_ops = { |
@@ -327,6 +350,12 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) | |||
327 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); | 350 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); |
328 | return -EINVAL; | 351 | return -EINVAL; |
329 | } | 352 | } |
353 | |||
354 | if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { | ||
355 | WARN(dma_reserved_default_memory, | ||
356 | "Reserved memory: region for default DMA coherent area is redefined\n"); | ||
357 | dma_reserved_default_memory = rmem; | ||
358 | } | ||
330 | #endif | 359 | #endif |
331 | 360 | ||
332 | rmem->ops = &rmem_dma_ops; | 361 | rmem->ops = &rmem_dma_ops; |
@@ -334,5 +363,32 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) | |||
334 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | 363 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
335 | return 0; | 364 | return 0; |
336 | } | 365 | } |
366 | |||
367 | static int __init dma_init_reserved_memory(void) | ||
368 | { | ||
369 | const struct reserved_mem_ops *ops; | ||
370 | int ret; | ||
371 | |||
372 | if (!dma_reserved_default_memory) | ||
373 | return -ENOMEM; | ||
374 | |||
375 | ops = dma_reserved_default_memory->ops; | ||
376 | |||
377 | /* | ||
378 | * We rely on rmem_dma_device_init() does not propagate error of | ||
379 | * dma_assign_coherent_memory() for "NULL" device. | ||
380 | */ | ||
381 | ret = ops->device_init(dma_reserved_default_memory, NULL); | ||
382 | |||
383 | if (!ret) { | ||
384 | dma_coherent_default_memory = dma_reserved_default_memory->priv; | ||
385 | pr_info("DMA: default coherent area is set\n"); | ||
386 | } | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | core_initcall(dma_init_reserved_memory); | ||
392 | |||
337 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); | 393 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); |
338 | #endif | 394 | #endif |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 9dbef4d1baa4..5096755d185e 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -22,20 +22,15 @@ struct dma_devres { | |||
22 | size_t size; | 22 | size_t size; |
23 | void *vaddr; | 23 | void *vaddr; |
24 | dma_addr_t dma_handle; | 24 | dma_addr_t dma_handle; |
25 | unsigned long attrs; | ||
25 | }; | 26 | }; |
26 | 27 | ||
27 | static void dmam_coherent_release(struct device *dev, void *res) | 28 | static void dmam_release(struct device *dev, void *res) |
28 | { | 29 | { |
29 | struct dma_devres *this = res; | 30 | struct dma_devres *this = res; |
30 | 31 | ||
31 | dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); | 32 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
32 | } | 33 | this->attrs); |
33 | |||
34 | static void dmam_noncoherent_release(struct device *dev, void *res) | ||
35 | { | ||
36 | struct dma_devres *this = res; | ||
37 | |||
38 | dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle); | ||
39 | } | 34 | } |
40 | 35 | ||
41 | static int dmam_match(struct device *dev, void *res, void *match_data) | 36 | static int dmam_match(struct device *dev, void *res, void *match_data) |
@@ -69,7 +64,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size, | |||
69 | struct dma_devres *dr; | 64 | struct dma_devres *dr; |
70 | void *vaddr; | 65 | void *vaddr; |
71 | 66 | ||
72 | dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp); | 67 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
73 | if (!dr) | 68 | if (!dr) |
74 | return NULL; | 69 | return NULL; |
75 | 70 | ||
@@ -104,35 +99,35 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
104 | struct dma_devres match_data = { size, vaddr, dma_handle }; | 99 | struct dma_devres match_data = { size, vaddr, dma_handle }; |
105 | 100 | ||
106 | dma_free_coherent(dev, size, vaddr, dma_handle); | 101 | dma_free_coherent(dev, size, vaddr, dma_handle); |
107 | WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match, | 102 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
108 | &match_data)); | ||
109 | } | 103 | } |
110 | EXPORT_SYMBOL(dmam_free_coherent); | 104 | EXPORT_SYMBOL(dmam_free_coherent); |
111 | 105 | ||
112 | /** | 106 | /** |
113 | * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent() | 107 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
114 | * @dev: Device to allocate non_coherent memory for | 108 | * @dev: Device to allocate non_coherent memory for |
115 | * @size: Size of allocation | 109 | * @size: Size of allocation |
116 | * @dma_handle: Out argument for allocated DMA handle | 110 | * @dma_handle: Out argument for allocated DMA handle |
117 | * @gfp: Allocation flags | 111 | * @gfp: Allocation flags |
112 | * @attrs: Flags in the DMA_ATTR_* namespace. | ||
118 | * | 113 | * |
119 | * Managed dma_alloc_noncoherent(). Memory allocated using this | 114 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
120 | * function will be automatically released on driver detach. | 115 | * automatically released on driver detach. |
121 | * | 116 | * |
122 | * RETURNS: | 117 | * RETURNS: |
123 | * Pointer to allocated memory on success, NULL on failure. | 118 | * Pointer to allocated memory on success, NULL on failure. |
124 | */ | 119 | */ |
125 | void *dmam_alloc_noncoherent(struct device *dev, size_t size, | 120 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
126 | dma_addr_t *dma_handle, gfp_t gfp) | 121 | gfp_t gfp, unsigned long attrs) |
127 | { | 122 | { |
128 | struct dma_devres *dr; | 123 | struct dma_devres *dr; |
129 | void *vaddr; | 124 | void *vaddr; |
130 | 125 | ||
131 | dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp); | 126 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
132 | if (!dr) | 127 | if (!dr) |
133 | return NULL; | 128 | return NULL; |
134 | 129 | ||
135 | vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 130 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
136 | if (!vaddr) { | 131 | if (!vaddr) { |
137 | devres_free(dr); | 132 | devres_free(dr); |
138 | return NULL; | 133 | return NULL; |
@@ -141,32 +136,13 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size, | |||
141 | dr->vaddr = vaddr; | 136 | dr->vaddr = vaddr; |
142 | dr->dma_handle = *dma_handle; | 137 | dr->dma_handle = *dma_handle; |
143 | dr->size = size; | 138 | dr->size = size; |
139 | dr->attrs = attrs; | ||
144 | 140 | ||
145 | devres_add(dev, dr); | 141 | devres_add(dev, dr); |
146 | 142 | ||
147 | return vaddr; | 143 | return vaddr; |
148 | } | 144 | } |
149 | EXPORT_SYMBOL(dmam_alloc_noncoherent); | 145 | EXPORT_SYMBOL(dmam_alloc_attrs); |
150 | |||
151 | /** | ||
152 | * dmam_free_coherent - Managed dma_free_noncoherent() | ||
153 | * @dev: Device to free noncoherent memory for | ||
154 | * @size: Size of allocation | ||
155 | * @vaddr: Virtual address of the memory to free | ||
156 | * @dma_handle: DMA handle of the memory to free | ||
157 | * | ||
158 | * Managed dma_free_noncoherent(). | ||
159 | */ | ||
160 | void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
161 | dma_addr_t dma_handle) | ||
162 | { | ||
163 | struct dma_devres match_data = { size, vaddr, dma_handle }; | ||
164 | |||
165 | dma_free_noncoherent(dev, size, vaddr, dma_handle); | ||
166 | WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match, | ||
167 | &match_data)); | ||
168 | } | ||
169 | EXPORT_SYMBOL(dmam_free_noncoherent); | ||
170 | 146 | ||
171 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | 147 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
172 | 148 | ||
@@ -251,7 +227,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
251 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 227 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
252 | { | 228 | { |
253 | int ret = -ENXIO; | 229 | int ret = -ENXIO; |
254 | #if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) | 230 | #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP |
255 | unsigned long user_count = vma_pages(vma); | 231 | unsigned long user_count = vma_pages(vma); |
256 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 232 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
257 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); | 233 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); |
@@ -268,7 +244,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
268 | user_count << PAGE_SHIFT, | 244 | user_count << PAGE_SHIFT, |
269 | vma->vm_page_prot); | 245 | vma->vm_page_prot); |
270 | } | 246 | } |
271 | #endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ | 247 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ |
272 | 248 | ||
273 | return ret; | 249 | return ret; |
274 | } | 250 | } |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 5b5efcc52cb5..baffae817259 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -686,7 +686,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
686 | 686 | ||
687 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); | 687 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); |
688 | if (unlikely(dma_mapping_error(dev, blp))) | 688 | if (unlikely(dma_mapping_error(dev, blp))) |
689 | goto err; | 689 | goto err_in; |
690 | 690 | ||
691 | for_each_sg(sgl, sg, n, i) { | 691 | for_each_sg(sgl, sg, n, i) { |
692 | int y = sg_nctr; | 692 | int y = sg_nctr; |
@@ -699,7 +699,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
699 | DMA_BIDIRECTIONAL); | 699 | DMA_BIDIRECTIONAL); |
700 | bufl->bufers[y].len = sg->length; | 700 | bufl->bufers[y].len = sg->length; |
701 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | 701 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) |
702 | goto err; | 702 | goto err_in; |
703 | sg_nctr++; | 703 | sg_nctr++; |
704 | } | 704 | } |
705 | bufl->num_bufs = sg_nctr; | 705 | bufl->num_bufs = sg_nctr; |
@@ -717,10 +717,10 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
717 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, | 717 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, |
718 | dev_to_node(&GET_DEV(inst->accel_dev))); | 718 | dev_to_node(&GET_DEV(inst->accel_dev))); |
719 | if (unlikely(!buflout)) | 719 | if (unlikely(!buflout)) |
720 | goto err; | 720 | goto err_in; |
721 | bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); | 721 | bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); |
722 | if (unlikely(dma_mapping_error(dev, bloutp))) | 722 | if (unlikely(dma_mapping_error(dev, bloutp))) |
723 | goto err; | 723 | goto err_out; |
724 | bufers = buflout->bufers; | 724 | bufers = buflout->bufers; |
725 | for_each_sg(sglout, sg, n, i) { | 725 | for_each_sg(sglout, sg, n, i) { |
726 | int y = sg_nctr; | 726 | int y = sg_nctr; |
@@ -732,7 +732,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
732 | sg->length, | 732 | sg->length, |
733 | DMA_BIDIRECTIONAL); | 733 | DMA_BIDIRECTIONAL); |
734 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | 734 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) |
735 | goto err; | 735 | goto err_out; |
736 | bufers[y].len = sg->length; | 736 | bufers[y].len = sg->length; |
737 | sg_nctr++; | 737 | sg_nctr++; |
738 | } | 738 | } |
@@ -747,9 +747,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
747 | qat_req->buf.sz_out = 0; | 747 | qat_req->buf.sz_out = 0; |
748 | } | 748 | } |
749 | return 0; | 749 | return 0; |
750 | err: | 750 | |
751 | dev_err(dev, "Failed to map buf for dma\n"); | 751 | err_out: |
752 | sg_nctr = 0; | 752 | n = sg_nents(sglout); |
753 | for (i = 0; i < n; i++) | ||
754 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) | ||
755 | dma_unmap_single(dev, buflout->bufers[i].addr, | ||
756 | buflout->bufers[i].len, | ||
757 | DMA_BIDIRECTIONAL); | ||
758 | if (!dma_mapping_error(dev, bloutp)) | ||
759 | dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); | ||
760 | kfree(buflout); | ||
761 | |||
762 | err_in: | ||
763 | n = sg_nents(sgl); | ||
753 | for (i = 0; i < n; i++) | 764 | for (i = 0; i < n; i++) |
754 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) | 765 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) |
755 | dma_unmap_single(dev, bufl->bufers[i].addr, | 766 | dma_unmap_single(dev, bufl->bufers[i].addr, |
@@ -759,17 +770,8 @@ err: | |||
759 | if (!dma_mapping_error(dev, blp)) | 770 | if (!dma_mapping_error(dev, blp)) |
760 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | 771 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
761 | kfree(bufl); | 772 | kfree(bufl); |
762 | if (sgl != sglout && buflout) { | 773 | |
763 | n = sg_nents(sglout); | 774 | dev_err(dev, "Failed to map buf for dma\n"); |
764 | for (i = 0; i < n; i++) | ||
765 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) | ||
766 | dma_unmap_single(dev, buflout->bufers[i].addr, | ||
767 | buflout->bufers[i].len, | ||
768 | DMA_BIDIRECTIONAL); | ||
769 | if (!dma_mapping_error(dev, bloutp)) | ||
770 | dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); | ||
771 | kfree(buflout); | ||
772 | } | ||
773 | return -ENOMEM; | 775 | return -ENOMEM; |
774 | } | 776 | } |
775 | 777 | ||
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 6ad4384b3fa8..ed8ed1192775 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -839,8 +839,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
839 | goto free_resources; | 839 | goto free_resources; |
840 | } | 840 | } |
841 | 841 | ||
842 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
843 | dma_srcs[i] = DMA_ERROR_CODE; | ||
844 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | 842 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { |
845 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | 843 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, |
846 | DMA_TO_DEVICE); | 844 | DMA_TO_DEVICE); |
@@ -910,8 +908,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
910 | 908 | ||
911 | xor_val_result = 1; | 909 | xor_val_result = 1; |
912 | 910 | ||
913 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
914 | dma_srcs[i] = DMA_ERROR_CODE; | ||
915 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 911 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
916 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 912 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
917 | DMA_TO_DEVICE); | 913 | DMA_TO_DEVICE); |
@@ -965,8 +961,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
965 | op = IOAT_OP_XOR_VAL; | 961 | op = IOAT_OP_XOR_VAL; |
966 | 962 | ||
967 | xor_val_result = 0; | 963 | xor_val_result = 0; |
968 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
969 | dma_srcs[i] = DMA_ERROR_CODE; | ||
970 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 964 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
971 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 965 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
972 | DMA_TO_DEVICE); | 966 | DMA_TO_DEVICE); |
@@ -1017,18 +1011,14 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
1017 | goto free_resources; | 1011 | goto free_resources; |
1018 | dma_unmap: | 1012 | dma_unmap: |
1019 | if (op == IOAT_OP_XOR) { | 1013 | if (op == IOAT_OP_XOR) { |
1020 | if (dest_dma != DMA_ERROR_CODE) | 1014 | while (--i >= 0) |
1021 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, | 1015 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1022 | DMA_FROM_DEVICE); | 1016 | DMA_TO_DEVICE); |
1023 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 1017 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
1024 | if (dma_srcs[i] != DMA_ERROR_CODE) | ||
1025 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
1026 | DMA_TO_DEVICE); | ||
1027 | } else if (op == IOAT_OP_XOR_VAL) { | 1018 | } else if (op == IOAT_OP_XOR_VAL) { |
1028 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1019 | while (--i >= 0) |
1029 | if (dma_srcs[i] != DMA_ERROR_CODE) | 1020 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, |
1030 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | 1021 | DMA_TO_DEVICE); |
1031 | DMA_TO_DEVICE); | ||
1032 | } | 1022 | } |
1033 | free_resources: | 1023 | free_resources: |
1034 | dma->device_free_chan_resources(dma_chan); | 1024 | dma->device_free_chan_resources(dma_chan); |
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c index 29ecfd815320..a01461d63f68 100644 --- a/drivers/firmware/tegra/ivc.c +++ b/drivers/firmware/tegra/ivc.c | |||
@@ -646,12 +646,12 @@ int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, | |||
646 | if (peer) { | 646 | if (peer) { |
647 | ivc->rx.phys = dma_map_single(peer, rx, queue_size, | 647 | ivc->rx.phys = dma_map_single(peer, rx, queue_size, |
648 | DMA_BIDIRECTIONAL); | 648 | DMA_BIDIRECTIONAL); |
649 | if (ivc->rx.phys == DMA_ERROR_CODE) | 649 | if (dma_mapping_error(peer, ivc->rx.phys)) |
650 | return -ENOMEM; | 650 | return -ENOMEM; |
651 | 651 | ||
652 | ivc->tx.phys = dma_map_single(peer, tx, queue_size, | 652 | ivc->tx.phys = dma_map_single(peer, tx, queue_size, |
653 | DMA_BIDIRECTIONAL); | 653 | DMA_BIDIRECTIONAL); |
654 | if (ivc->tx.phys == DMA_ERROR_CODE) { | 654 | if (dma_mapping_error(peer, ivc->tx.phys)) { |
655 | dma_unmap_single(peer, ivc->rx.phys, queue_size, | 655 | dma_unmap_single(peer, ivc->rx.phys, queue_size, |
656 | DMA_BIDIRECTIONAL); | 656 | DMA_BIDIRECTIONAL); |
657 | return -ENOMEM; | 657 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index 2a7eb6817c36..92e6b08ea64a 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c | |||
@@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* Framebuffer objects must have a valid device address for scanout */ | 135 | /* Framebuffer objects must have a valid device address for scanout */ |
136 | if (obj->dev_addr == DMA_ERROR_CODE) { | 136 | if (!obj->mapped) { |
137 | ret = -EINVAL; | 137 | ret = -EINVAL; |
138 | goto err_unref; | 138 | goto err_unref; |
139 | } | 139 | } |
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index d6c2a5d190eb..a76ca21d063b 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c | |||
@@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) | |||
175 | 175 | ||
176 | obj->phys_addr = obj->linear->start; | 176 | obj->phys_addr = obj->linear->start; |
177 | obj->dev_addr = obj->linear->start; | 177 | obj->dev_addr = obj->linear->start; |
178 | obj->mapped = true; | ||
178 | } | 179 | } |
179 | 180 | ||
180 | DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, | 181 | DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, |
@@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size) | |||
205 | return NULL; | 206 | return NULL; |
206 | 207 | ||
207 | drm_gem_private_object_init(dev, &obj->obj, size); | 208 | drm_gem_private_object_init(dev, &obj->obj, size); |
208 | obj->dev_addr = DMA_ERROR_CODE; | ||
209 | 209 | ||
210 | DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); | 210 | DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); |
211 | 211 | ||
@@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, | |||
229 | return NULL; | 229 | return NULL; |
230 | } | 230 | } |
231 | 231 | ||
232 | obj->dev_addr = DMA_ERROR_CODE; | ||
233 | |||
234 | mapping = obj->obj.filp->f_mapping; | 232 | mapping = obj->obj.filp->f_mapping; |
235 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | 233 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); |
236 | 234 | ||
@@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj) | |||
610 | return -EINVAL; | 608 | return -EINVAL; |
611 | } | 609 | } |
612 | dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); | 610 | dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); |
611 | dobj->mapped = true; | ||
613 | return 0; | 612 | return 0; |
614 | } | 613 | } |
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h index b88d2b9853c7..6e524e0676bb 100644 --- a/drivers/gpu/drm/armada/armada_gem.h +++ b/drivers/gpu/drm/armada/armada_gem.h | |||
@@ -16,6 +16,7 @@ struct armada_gem_object { | |||
16 | void *addr; | 16 | void *addr; |
17 | phys_addr_t phys_addr; | 17 | phys_addr_t phys_addr; |
18 | resource_size_t dev_addr; | 18 | resource_size_t dev_addr; |
19 | bool mapped; | ||
19 | struct drm_mm_node *linear; /* for linear backed */ | 20 | struct drm_mm_node *linear; /* for linear backed */ |
20 | struct page *page; /* for page backed */ | 21 | struct page *page; /* for page backed */ |
21 | struct sg_table *sgt; /* for imported */ | 22 | struct sg_table *sgt; /* for imported */ |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index c77a5aced81a..d48fd7c918f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -181,8 +181,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) | |||
181 | { | 181 | { |
182 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 182 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); |
183 | 183 | ||
184 | if (index >= MAX_FB_BUFFER) | 184 | if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) |
185 | return DMA_ERROR_CODE; | 185 | return 0; |
186 | 186 | ||
187 | return exynos_fb->dma_addr[index]; | 187 | return exynos_fb->dma_addr[index]; |
188 | } | 188 | } |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 5c9759ed22ca..f16d0f26ee24 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -54,6 +54,8 @@ | |||
54 | #include "amd_iommu_types.h" | 54 | #include "amd_iommu_types.h" |
55 | #include "irq_remapping.h" | 55 | #include "irq_remapping.h" |
56 | 56 | ||
57 | #define AMD_IOMMU_MAPPING_ERROR 0 | ||
58 | |||
57 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 59 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
58 | 60 | ||
59 | #define LOOP_TIMEOUT 100000 | 61 | #define LOOP_TIMEOUT 100000 |
@@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
2394 | paddr &= PAGE_MASK; | 2396 | paddr &= PAGE_MASK; |
2395 | 2397 | ||
2396 | address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); | 2398 | address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); |
2397 | if (address == DMA_ERROR_CODE) | 2399 | if (address == AMD_IOMMU_MAPPING_ERROR) |
2398 | goto out; | 2400 | goto out; |
2399 | 2401 | ||
2400 | prot = dir2prot(direction); | 2402 | prot = dir2prot(direction); |
@@ -2431,7 +2433,7 @@ out_unmap: | |||
2431 | 2433 | ||
2432 | dma_ops_free_iova(dma_dom, address, pages); | 2434 | dma_ops_free_iova(dma_dom, address, pages); |
2433 | 2435 | ||
2434 | return DMA_ERROR_CODE; | 2436 | return AMD_IOMMU_MAPPING_ERROR; |
2435 | } | 2437 | } |
2436 | 2438 | ||
2437 | /* | 2439 | /* |
@@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2483 | if (PTR_ERR(domain) == -EINVAL) | 2485 | if (PTR_ERR(domain) == -EINVAL) |
2484 | return (dma_addr_t)paddr; | 2486 | return (dma_addr_t)paddr; |
2485 | else if (IS_ERR(domain)) | 2487 | else if (IS_ERR(domain)) |
2486 | return DMA_ERROR_CODE; | 2488 | return AMD_IOMMU_MAPPING_ERROR; |
2487 | 2489 | ||
2488 | dma_mask = *dev->dma_mask; | 2490 | dma_mask = *dev->dma_mask; |
2489 | dma_dom = to_dma_ops_domain(domain); | 2491 | dma_dom = to_dma_ops_domain(domain); |
@@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
2560 | npages = sg_num_pages(dev, sglist, nelems); | 2562 | npages = sg_num_pages(dev, sglist, nelems); |
2561 | 2563 | ||
2562 | address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); | 2564 | address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); |
2563 | if (address == DMA_ERROR_CODE) | 2565 | if (address == AMD_IOMMU_MAPPING_ERROR) |
2564 | goto out_err; | 2566 | goto out_err; |
2565 | 2567 | ||
2566 | prot = dir2prot(direction); | 2568 | prot = dir2prot(direction); |
@@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2683 | *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), | 2685 | *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), |
2684 | size, DMA_BIDIRECTIONAL, dma_mask); | 2686 | size, DMA_BIDIRECTIONAL, dma_mask); |
2685 | 2687 | ||
2686 | if (*dma_addr == DMA_ERROR_CODE) | 2688 | if (*dma_addr == AMD_IOMMU_MAPPING_ERROR) |
2687 | goto out_free; | 2689 | goto out_free; |
2688 | 2690 | ||
2689 | return page_address(page); | 2691 | return page_address(page); |
@@ -2729,9 +2731,16 @@ free_mem: | |||
2729 | */ | 2731 | */ |
2730 | static int amd_iommu_dma_supported(struct device *dev, u64 mask) | 2732 | static int amd_iommu_dma_supported(struct device *dev, u64 mask) |
2731 | { | 2733 | { |
2734 | if (!x86_dma_supported(dev, mask)) | ||
2735 | return 0; | ||
2732 | return check_device(dev); | 2736 | return check_device(dev); |
2733 | } | 2737 | } |
2734 | 2738 | ||
2739 | static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
2740 | { | ||
2741 | return dma_addr == AMD_IOMMU_MAPPING_ERROR; | ||
2742 | } | ||
2743 | |||
2735 | static const struct dma_map_ops amd_iommu_dma_ops = { | 2744 | static const struct dma_map_ops amd_iommu_dma_ops = { |
2736 | .alloc = alloc_coherent, | 2745 | .alloc = alloc_coherent, |
2737 | .free = free_coherent, | 2746 | .free = free_coherent, |
@@ -2740,6 +2749,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = { | |||
2740 | .map_sg = map_sg, | 2749 | .map_sg = map_sg, |
2741 | .unmap_sg = unmap_sg, | 2750 | .unmap_sg = unmap_sg, |
2742 | .dma_supported = amd_iommu_dma_supported, | 2751 | .dma_supported = amd_iommu_dma_supported, |
2752 | .mapping_error = amd_iommu_mapping_error, | ||
2743 | }; | 2753 | }; |
2744 | 2754 | ||
2745 | static int init_reserved_iova_ranges(void) | 2755 | static int init_reserved_iova_ranges(void) |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 62618e77bedc..9403336f1fa6 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | 33 | ||
34 | #define IOMMU_MAPPING_ERROR 0 | ||
35 | |||
34 | struct iommu_dma_msi_page { | 36 | struct iommu_dma_msi_page { |
35 | struct list_head list; | 37 | struct list_head list; |
36 | dma_addr_t iova; | 38 | dma_addr_t iova; |
@@ -500,7 +502,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |||
500 | { | 502 | { |
501 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); | 503 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
502 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | 504 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
503 | *handle = DMA_ERROR_CODE; | 505 | *handle = IOMMU_MAPPING_ERROR; |
504 | } | 506 | } |
505 | 507 | ||
506 | /** | 508 | /** |
@@ -533,7 +535,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
533 | dma_addr_t iova; | 535 | dma_addr_t iova; |
534 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; | 536 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
535 | 537 | ||
536 | *handle = DMA_ERROR_CODE; | 538 | *handle = IOMMU_MAPPING_ERROR; |
537 | 539 | ||
538 | min_size = alloc_sizes & -alloc_sizes; | 540 | min_size = alloc_sizes & -alloc_sizes; |
539 | if (min_size < PAGE_SIZE) { | 541 | if (min_size < PAGE_SIZE) { |
@@ -627,11 +629,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | |||
627 | 629 | ||
628 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | 630 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
629 | if (!iova) | 631 | if (!iova) |
630 | return DMA_ERROR_CODE; | 632 | return IOMMU_MAPPING_ERROR; |
631 | 633 | ||
632 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { | 634 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
633 | iommu_dma_free_iova(cookie, iova, size); | 635 | iommu_dma_free_iova(cookie, iova, size); |
634 | return DMA_ERROR_CODE; | 636 | return IOMMU_MAPPING_ERROR; |
635 | } | 637 | } |
636 | return iova + iova_off; | 638 | return iova + iova_off; |
637 | } | 639 | } |
@@ -671,7 +673,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
671 | 673 | ||
672 | s->offset += s_iova_off; | 674 | s->offset += s_iova_off; |
673 | s->length = s_length; | 675 | s->length = s_length; |
674 | sg_dma_address(s) = DMA_ERROR_CODE; | 676 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
675 | sg_dma_len(s) = 0; | 677 | sg_dma_len(s) = 0; |
676 | 678 | ||
677 | /* | 679 | /* |
@@ -714,11 +716,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) | |||
714 | int i; | 716 | int i; |
715 | 717 | ||
716 | for_each_sg(sg, s, nents, i) { | 718 | for_each_sg(sg, s, nents, i) { |
717 | if (sg_dma_address(s) != DMA_ERROR_CODE) | 719 | if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) |
718 | s->offset += sg_dma_address(s); | 720 | s->offset += sg_dma_address(s); |
719 | if (sg_dma_len(s)) | 721 | if (sg_dma_len(s)) |
720 | s->length = sg_dma_len(s); | 722 | s->length = sg_dma_len(s); |
721 | sg_dma_address(s) = DMA_ERROR_CODE; | 723 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; |
722 | sg_dma_len(s) = 0; | 724 | sg_dma_len(s) = 0; |
723 | } | 725 | } |
724 | } | 726 | } |
@@ -836,7 +838,7 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |||
836 | 838 | ||
837 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 839 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
838 | { | 840 | { |
839 | return dma_addr == DMA_ERROR_CODE; | 841 | return dma_addr == IOMMU_MAPPING_ERROR; |
840 | } | 842 | } |
841 | 843 | ||
842 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | 844 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 8500deda9175..1e95475883cd 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = { | |||
3981 | .map_page = intel_map_page, | 3981 | .map_page = intel_map_page, |
3982 | .unmap_page = intel_unmap_page, | 3982 | .unmap_page = intel_unmap_page, |
3983 | .mapping_error = intel_mapping_error, | 3983 | .mapping_error = intel_mapping_error, |
3984 | #ifdef CONFIG_X86 | ||
3985 | .dma_supported = x86_dma_supported, | ||
3986 | #endif | ||
3984 | }; | 3987 | }; |
3985 | 3988 | ||
3986 | static inline int iommu_domain_cache_init(void) | 3989 | static inline int iommu_domain_cache_init(void) |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3e0a695537e2..d17c2b03f580 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -469,56 +469,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
469 | } | 469 | } |
470 | } | 470 | } |
471 | 471 | ||
472 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | ||
473 | { | ||
474 | int i; | ||
475 | struct device *dev = &adapter->vdev->dev; | ||
476 | |||
477 | if (adapter->buffer_list_addr != NULL) { | ||
478 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { | ||
479 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | ||
480 | DMA_BIDIRECTIONAL); | ||
481 | adapter->buffer_list_dma = DMA_ERROR_CODE; | ||
482 | } | ||
483 | free_page((unsigned long)adapter->buffer_list_addr); | ||
484 | adapter->buffer_list_addr = NULL; | ||
485 | } | ||
486 | |||
487 | if (adapter->filter_list_addr != NULL) { | ||
488 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { | ||
489 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | ||
490 | DMA_BIDIRECTIONAL); | ||
491 | adapter->filter_list_dma = DMA_ERROR_CODE; | ||
492 | } | ||
493 | free_page((unsigned long)adapter->filter_list_addr); | ||
494 | adapter->filter_list_addr = NULL; | ||
495 | } | ||
496 | |||
497 | if (adapter->rx_queue.queue_addr != NULL) { | ||
498 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | ||
499 | adapter->rx_queue.queue_addr, | ||
500 | adapter->rx_queue.queue_dma); | ||
501 | adapter->rx_queue.queue_addr = NULL; | ||
502 | } | ||
503 | |||
504 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | ||
505 | if (adapter->rx_buff_pool[i].active) | ||
506 | ibmveth_free_buffer_pool(adapter, | ||
507 | &adapter->rx_buff_pool[i]); | ||
508 | |||
509 | if (adapter->bounce_buffer != NULL) { | ||
510 | if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | ||
511 | dma_unmap_single(&adapter->vdev->dev, | ||
512 | adapter->bounce_buffer_dma, | ||
513 | adapter->netdev->mtu + IBMVETH_BUFF_OH, | ||
514 | DMA_BIDIRECTIONAL); | ||
515 | adapter->bounce_buffer_dma = DMA_ERROR_CODE; | ||
516 | } | ||
517 | kfree(adapter->bounce_buffer); | ||
518 | adapter->bounce_buffer = NULL; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | 472 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
523 | union ibmveth_buf_desc rxq_desc, u64 mac_address) | 473 | union ibmveth_buf_desc rxq_desc, u64 mac_address) |
524 | { | 474 | { |
@@ -575,14 +525,17 @@ static int ibmveth_open(struct net_device *netdev) | |||
575 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | 525 | for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
576 | rxq_entries += adapter->rx_buff_pool[i].size; | 526 | rxq_entries += adapter->rx_buff_pool[i].size; |
577 | 527 | ||
528 | rc = -ENOMEM; | ||
578 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 529 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
579 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 530 | if (!adapter->buffer_list_addr) { |
531 | netdev_err(netdev, "unable to allocate list pages\n"); | ||
532 | goto out; | ||
533 | } | ||
580 | 534 | ||
581 | if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 535 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
582 | netdev_err(netdev, "unable to allocate filter or buffer list " | 536 | if (!adapter->filter_list_addr) { |
583 | "pages\n"); | 537 | netdev_err(netdev, "unable to allocate filter pages\n"); |
584 | rc = -ENOMEM; | 538 | goto out_free_buffer_list; |
585 | goto err_out; | ||
586 | } | 539 | } |
587 | 540 | ||
588 | dev = &adapter->vdev->dev; | 541 | dev = &adapter->vdev->dev; |
@@ -592,22 +545,21 @@ static int ibmveth_open(struct net_device *netdev) | |||
592 | adapter->rx_queue.queue_addr = | 545 | adapter->rx_queue.queue_addr = |
593 | dma_alloc_coherent(dev, adapter->rx_queue.queue_len, | 546 | dma_alloc_coherent(dev, adapter->rx_queue.queue_len, |
594 | &adapter->rx_queue.queue_dma, GFP_KERNEL); | 547 | &adapter->rx_queue.queue_dma, GFP_KERNEL); |
595 | if (!adapter->rx_queue.queue_addr) { | 548 | if (!adapter->rx_queue.queue_addr) |
596 | rc = -ENOMEM; | 549 | goto out_free_filter_list; |
597 | goto err_out; | ||
598 | } | ||
599 | 550 | ||
600 | adapter->buffer_list_dma = dma_map_single(dev, | 551 | adapter->buffer_list_dma = dma_map_single(dev, |
601 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 552 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
553 | if (dma_mapping_error(dev, adapter->buffer_list_dma)) { | ||
554 | netdev_err(netdev, "unable to map buffer list pages\n"); | ||
555 | goto out_free_queue_mem; | ||
556 | } | ||
557 | |||
602 | adapter->filter_list_dma = dma_map_single(dev, | 558 | adapter->filter_list_dma = dma_map_single(dev, |
603 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 559 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
604 | 560 | if (dma_mapping_error(dev, adapter->filter_list_dma)) { | |
605 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || | 561 | netdev_err(netdev, "unable to map filter list pages\n"); |
606 | (dma_mapping_error(dev, adapter->filter_list_dma))) { | 562 | goto out_unmap_buffer_list; |
607 | netdev_err(netdev, "unable to map filter or buffer list " | ||
608 | "pages\n"); | ||
609 | rc = -ENOMEM; | ||
610 | goto err_out; | ||
611 | } | 563 | } |
612 | 564 | ||
613 | adapter->rx_queue.index = 0; | 565 | adapter->rx_queue.index = 0; |
@@ -638,7 +590,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
638 | rxq_desc.desc, | 590 | rxq_desc.desc, |
639 | mac_address); | 591 | mac_address); |
640 | rc = -ENONET; | 592 | rc = -ENONET; |
641 | goto err_out; | 593 | goto out_unmap_filter_list; |
642 | } | 594 | } |
643 | 595 | ||
644 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 596 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
@@ -648,7 +600,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
648 | netdev_err(netdev, "unable to alloc pool\n"); | 600 | netdev_err(netdev, "unable to alloc pool\n"); |
649 | adapter->rx_buff_pool[i].active = 0; | 601 | adapter->rx_buff_pool[i].active = 0; |
650 | rc = -ENOMEM; | 602 | rc = -ENOMEM; |
651 | goto err_out; | 603 | goto out_free_buffer_pools; |
652 | } | 604 | } |
653 | } | 605 | } |
654 | 606 | ||
@@ -662,22 +614,21 @@ static int ibmveth_open(struct net_device *netdev) | |||
662 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); | 614 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
663 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); | 615 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
664 | 616 | ||
665 | goto err_out; | 617 | goto out_free_buffer_pools; |
666 | } | 618 | } |
667 | 619 | ||
620 | rc = -ENOMEM; | ||
668 | adapter->bounce_buffer = | 621 | adapter->bounce_buffer = |
669 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); | 622 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); |
670 | if (!adapter->bounce_buffer) { | 623 | if (!adapter->bounce_buffer) |
671 | rc = -ENOMEM; | 624 | goto out_free_irq; |
672 | goto err_out_free_irq; | 625 | |
673 | } | ||
674 | adapter->bounce_buffer_dma = | 626 | adapter->bounce_buffer_dma = |
675 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | 627 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, |
676 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | 628 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); |
677 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { | 629 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { |
678 | netdev_err(netdev, "unable to map bounce buffer\n"); | 630 | netdev_err(netdev, "unable to map bounce buffer\n"); |
679 | rc = -ENOMEM; | 631 | goto out_free_bounce_buffer; |
680 | goto err_out_free_irq; | ||
681 | } | 632 | } |
682 | 633 | ||
683 | netdev_dbg(netdev, "initial replenish cycle\n"); | 634 | netdev_dbg(netdev, "initial replenish cycle\n"); |
@@ -689,10 +640,31 @@ static int ibmveth_open(struct net_device *netdev) | |||
689 | 640 | ||
690 | return 0; | 641 | return 0; |
691 | 642 | ||
692 | err_out_free_irq: | 643 | out_free_bounce_buffer: |
644 | kfree(adapter->bounce_buffer); | ||
645 | out_free_irq: | ||
693 | free_irq(netdev->irq, netdev); | 646 | free_irq(netdev->irq, netdev); |
694 | err_out: | 647 | out_free_buffer_pools: |
695 | ibmveth_cleanup(adapter); | 648 | while (--i >= 0) { |
649 | if (adapter->rx_buff_pool[i].active) | ||
650 | ibmveth_free_buffer_pool(adapter, | ||
651 | &adapter->rx_buff_pool[i]); | ||
652 | } | ||
653 | out_unmap_filter_list: | ||
654 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | ||
655 | DMA_BIDIRECTIONAL); | ||
656 | out_unmap_buffer_list: | ||
657 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, | ||
658 | DMA_BIDIRECTIONAL); | ||
659 | out_free_queue_mem: | ||
660 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | ||
661 | adapter->rx_queue.queue_addr, | ||
662 | adapter->rx_queue.queue_dma); | ||
663 | out_free_filter_list: | ||
664 | free_page((unsigned long)adapter->filter_list_addr); | ||
665 | out_free_buffer_list: | ||
666 | free_page((unsigned long)adapter->buffer_list_addr); | ||
667 | out: | ||
696 | napi_disable(&adapter->napi); | 668 | napi_disable(&adapter->napi); |
697 | return rc; | 669 | return rc; |
698 | } | 670 | } |
@@ -700,7 +672,9 @@ err_out: | |||
700 | static int ibmveth_close(struct net_device *netdev) | 672 | static int ibmveth_close(struct net_device *netdev) |
701 | { | 673 | { |
702 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 674 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
675 | struct device *dev = &adapter->vdev->dev; | ||
703 | long lpar_rc; | 676 | long lpar_rc; |
677 | int i; | ||
704 | 678 | ||
705 | netdev_dbg(netdev, "close starting\n"); | 679 | netdev_dbg(netdev, "close starting\n"); |
706 | 680 | ||
@@ -724,7 +698,27 @@ static int ibmveth_close(struct net_device *netdev) | |||
724 | 698 | ||
725 | ibmveth_update_rx_no_buffer(adapter); | 699 | ibmveth_update_rx_no_buffer(adapter); |
726 | 700 | ||
727 | ibmveth_cleanup(adapter); | 701 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
702 | DMA_BIDIRECTIONAL); | ||
703 | free_page((unsigned long)adapter->buffer_list_addr); | ||
704 | |||
705 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, | ||
706 | DMA_BIDIRECTIONAL); | ||
707 | free_page((unsigned long)adapter->filter_list_addr); | ||
708 | |||
709 | dma_free_coherent(dev, adapter->rx_queue.queue_len, | ||
710 | adapter->rx_queue.queue_addr, | ||
711 | adapter->rx_queue.queue_dma); | ||
712 | |||
713 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | ||
714 | if (adapter->rx_buff_pool[i].active) | ||
715 | ibmveth_free_buffer_pool(adapter, | ||
716 | &adapter->rx_buff_pool[i]); | ||
717 | |||
718 | dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, | ||
719 | adapter->netdev->mtu + IBMVETH_BUFF_OH, | ||
720 | DMA_BIDIRECTIONAL); | ||
721 | kfree(adapter->bounce_buffer); | ||
728 | 722 | ||
729 | netdev_dbg(netdev, "close complete\n"); | 723 | netdev_dbg(netdev, "close complete\n"); |
730 | 724 | ||
@@ -1719,11 +1713,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
1719 | } | 1713 | } |
1720 | 1714 | ||
1721 | netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); | 1715 | netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); |
1722 | |||
1723 | adapter->buffer_list_dma = DMA_ERROR_CODE; | ||
1724 | adapter->filter_list_dma = DMA_ERROR_CODE; | ||
1725 | adapter->rx_queue.queue_dma = DMA_ERROR_CODE; | ||
1726 | |||
1727 | netdev_dbg(netdev, "registering netdev...\n"); | 1716 | netdev_dbg(netdev, "registering netdev...\n"); |
1728 | 1717 | ||
1729 | ibmveth_set_features(netdev, netdev->features); | 1718 | ibmveth_set_features(netdev, netdev->features); |
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 35df2c1a8a63..8de42f617d16 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c | |||
@@ -532,10 +532,6 @@ failed: | |||
532 | clk_disable_unprepare(fbdev->lcdclk); | 532 | clk_disable_unprepare(fbdev->lcdclk); |
533 | clk_put(fbdev->lcdclk); | 533 | clk_put(fbdev->lcdclk); |
534 | } | 534 | } |
535 | if (fbdev->fb_mem) { | ||
536 | dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem, | ||
537 | fbdev->fb_phys); | ||
538 | } | ||
539 | if (fbdev->info.cmap.len != 0) { | 535 | if (fbdev->info.cmap.len != 0) { |
540 | fb_dealloc_cmap(&fbdev->info.cmap); | 536 | fb_dealloc_cmap(&fbdev->info.cmap); |
541 | } | 537 | } |
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 6c2b2ca4a909..5f04b4096c42 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c | |||
@@ -1694,9 +1694,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) | |||
1694 | /* Allocate the framebuffer to the maximum screen size */ | 1694 | /* Allocate the framebuffer to the maximum screen size */ |
1695 | fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; | 1695 | fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; |
1696 | 1696 | ||
1697 | fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, | 1697 | fbdev->fb_mem = dmam_alloc_attrs(&dev->dev, |
1698 | PAGE_ALIGN(fbdev->fb_len), | 1698 | PAGE_ALIGN(fbdev->fb_len), |
1699 | &fbdev->fb_phys, GFP_KERNEL); | 1699 | &fbdev->fb_phys, GFP_KERNEL, |
1700 | DMA_ATTR_NON_CONSISTENT); | ||
1700 | if (!fbdev->fb_mem) { | 1701 | if (!fbdev->fb_mem) { |
1701 | print_err("fail to allocate frambuffer (size: %dK))", | 1702 | print_err("fail to allocate frambuffer (size: %dK))", |
1702 | fbdev->fb_len / 1024); | 1703 | fbdev->fb_len / 1024); |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 8dab0d3dc172..82fc54f8eb77 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -67,6 +67,8 @@ static unsigned long dma_alloc_coherent_mask(struct device *dev, | |||
67 | } | 67 | } |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) | ||
71 | |||
70 | static char *xen_io_tlb_start, *xen_io_tlb_end; | 72 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
71 | static unsigned long xen_io_tlb_nslabs; | 73 | static unsigned long xen_io_tlb_nslabs; |
72 | /* | 74 | /* |
@@ -295,7 +297,8 @@ error: | |||
295 | free_pages((unsigned long)xen_io_tlb_start, order); | 297 | free_pages((unsigned long)xen_io_tlb_start, order); |
296 | return rc; | 298 | return rc; |
297 | } | 299 | } |
298 | void * | 300 | |
301 | static void * | ||
299 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 302 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
300 | dma_addr_t *dma_handle, gfp_t flags, | 303 | dma_addr_t *dma_handle, gfp_t flags, |
301 | unsigned long attrs) | 304 | unsigned long attrs) |
@@ -346,9 +349,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
346 | memset(ret, 0, size); | 349 | memset(ret, 0, size); |
347 | return ret; | 350 | return ret; |
348 | } | 351 | } |
349 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); | ||
350 | 352 | ||
351 | void | 353 | static void |
352 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 354 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
353 | dma_addr_t dev_addr, unsigned long attrs) | 355 | dma_addr_t dev_addr, unsigned long attrs) |
354 | { | 356 | { |
@@ -369,8 +371,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
369 | 371 | ||
370 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); | 372 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
371 | } | 373 | } |
372 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | ||
373 | |||
374 | 374 | ||
375 | /* | 375 | /* |
376 | * Map a single buffer of the indicated size for DMA in streaming mode. The | 376 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
@@ -379,7 +379,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | |||
379 | * Once the device is given the dma address, the device owns this memory until | 379 | * Once the device is given the dma address, the device owns this memory until |
380 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. | 380 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
381 | */ | 381 | */ |
382 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | 382 | static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
383 | unsigned long offset, size_t size, | 383 | unsigned long offset, size_t size, |
384 | enum dma_data_direction dir, | 384 | enum dma_data_direction dir, |
385 | unsigned long attrs) | 385 | unsigned long attrs) |
@@ -412,7 +412,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
412 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, | 412 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, |
413 | attrs); | 413 | attrs); |
414 | if (map == SWIOTLB_MAP_ERROR) | 414 | if (map == SWIOTLB_MAP_ERROR) |
415 | return DMA_ERROR_CODE; | 415 | return XEN_SWIOTLB_ERROR_CODE; |
416 | 416 | ||
417 | dev_addr = xen_phys_to_bus(map); | 417 | dev_addr = xen_phys_to_bus(map); |
418 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), | 418 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
@@ -427,9 +427,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
427 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; | 427 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
428 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); | 428 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); |
429 | 429 | ||
430 | return DMA_ERROR_CODE; | 430 | return XEN_SWIOTLB_ERROR_CODE; |
431 | } | 431 | } |
432 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); | ||
433 | 432 | ||
434 | /* | 433 | /* |
435 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 434 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -467,13 +466,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
467 | dma_mark_clean(phys_to_virt(paddr), size); | 466 | dma_mark_clean(phys_to_virt(paddr), size); |
468 | } | 467 | } |
469 | 468 | ||
470 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 469 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
471 | size_t size, enum dma_data_direction dir, | 470 | size_t size, enum dma_data_direction dir, |
472 | unsigned long attrs) | 471 | unsigned long attrs) |
473 | { | 472 | { |
474 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); | 473 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
475 | } | 474 | } |
476 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); | ||
477 | 475 | ||
478 | /* | 476 | /* |
479 | * Make physical memory consistent for a single streaming mode DMA translation | 477 | * Make physical memory consistent for a single streaming mode DMA translation |
@@ -516,7 +514,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | |||
516 | { | 514 | { |
517 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | 515 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
518 | } | 516 | } |
519 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); | ||
520 | 517 | ||
521 | void | 518 | void |
522 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 519 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
@@ -524,7 +521,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
524 | { | 521 | { |
525 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | 522 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
526 | } | 523 | } |
527 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); | 524 | |
525 | /* | ||
526 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | ||
527 | * concerning calls here are the same as for swiotlb_unmap_page() above. | ||
528 | */ | ||
529 | static void | ||
530 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | ||
531 | int nelems, enum dma_data_direction dir, | ||
532 | unsigned long attrs) | ||
533 | { | ||
534 | struct scatterlist *sg; | ||
535 | int i; | ||
536 | |||
537 | BUG_ON(dir == DMA_NONE); | ||
538 | |||
539 | for_each_sg(sgl, sg, nelems, i) | ||
540 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); | ||
541 | |||
542 | } | ||
528 | 543 | ||
529 | /* | 544 | /* |
530 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 545 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
@@ -542,7 +557,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); | |||
542 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the | 557 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the |
543 | * same here. | 558 | * same here. |
544 | */ | 559 | */ |
545 | int | 560 | static int |
546 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 561 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
547 | int nelems, enum dma_data_direction dir, | 562 | int nelems, enum dma_data_direction dir, |
548 | unsigned long attrs) | 563 | unsigned long attrs) |
@@ -599,27 +614,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
599 | } | 614 | } |
600 | return nelems; | 615 | return nelems; |
601 | } | 616 | } |
602 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); | ||
603 | |||
604 | /* | ||
605 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | ||
606 | * concerning calls here are the same as for swiotlb_unmap_page() above. | ||
607 | */ | ||
608 | void | ||
609 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | ||
610 | int nelems, enum dma_data_direction dir, | ||
611 | unsigned long attrs) | ||
612 | { | ||
613 | struct scatterlist *sg; | ||
614 | int i; | ||
615 | |||
616 | BUG_ON(dir == DMA_NONE); | ||
617 | |||
618 | for_each_sg(sgl, sg, nelems, i) | ||
619 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); | ||
620 | |||
621 | } | ||
622 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); | ||
623 | 617 | ||
624 | /* | 618 | /* |
625 | * Make physical memory consistent for a set of streaming mode DMA translations | 619 | * Make physical memory consistent for a set of streaming mode DMA translations |
@@ -641,21 +635,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
641 | sg_dma_len(sg), dir, target); | 635 | sg_dma_len(sg), dir, target); |
642 | } | 636 | } |
643 | 637 | ||
644 | void | 638 | static void |
645 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 639 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
646 | int nelems, enum dma_data_direction dir) | 640 | int nelems, enum dma_data_direction dir) |
647 | { | 641 | { |
648 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | 642 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
649 | } | 643 | } |
650 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); | ||
651 | 644 | ||
652 | void | 645 | static void |
653 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 646 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
654 | int nelems, enum dma_data_direction dir) | 647 | int nelems, enum dma_data_direction dir) |
655 | { | 648 | { |
656 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 649 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
657 | } | 650 | } |
658 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); | ||
659 | 651 | ||
660 | /* | 652 | /* |
661 | * Return whether the given device DMA address mask can be supported | 653 | * Return whether the given device DMA address mask can be supported |
@@ -663,31 +655,18 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); | |||
663 | * during bus mastering, then you would pass 0x00ffffff as the mask to | 655 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
664 | * this function. | 656 | * this function. |
665 | */ | 657 | */ |
666 | int | 658 | static int |
667 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) | 659 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
668 | { | 660 | { |
669 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; | 661 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
670 | } | 662 | } |
671 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); | ||
672 | |||
673 | int | ||
674 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) | ||
675 | { | ||
676 | if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) | ||
677 | return -EIO; | ||
678 | |||
679 | *dev->dma_mask = dma_mask; | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); | ||
684 | 663 | ||
685 | /* | 664 | /* |
686 | * Create userspace mapping for the DMA-coherent memory. | 665 | * Create userspace mapping for the DMA-coherent memory. |
687 | * This function should be called with the pages from the current domain only, | 666 | * This function should be called with the pages from the current domain only, |
688 | * passing pages mapped from other domains would lead to memory corruption. | 667 | * passing pages mapped from other domains would lead to memory corruption. |
689 | */ | 668 | */ |
690 | int | 669 | static int |
691 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 670 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
692 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 671 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
693 | unsigned long attrs) | 672 | unsigned long attrs) |
@@ -699,13 +678,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
699 | #endif | 678 | #endif |
700 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | 679 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
701 | } | 680 | } |
702 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap); | ||
703 | 681 | ||
704 | /* | 682 | /* |
705 | * This function should be called with the pages from the current domain only, | 683 | * This function should be called with the pages from the current domain only, |
706 | * passing pages mapped from other domains would lead to memory corruption. | 684 | * passing pages mapped from other domains would lead to memory corruption. |
707 | */ | 685 | */ |
708 | int | 686 | static int |
709 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | 687 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
710 | void *cpu_addr, dma_addr_t handle, size_t size, | 688 | void *cpu_addr, dma_addr_t handle, size_t size, |
711 | unsigned long attrs) | 689 | unsigned long attrs) |
@@ -727,4 +705,25 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
727 | #endif | 705 | #endif |
728 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); | 706 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); |
729 | } | 707 | } |
730 | EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable); | 708 | |
709 | static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
710 | { | ||
711 | return dma_addr == XEN_SWIOTLB_ERROR_CODE; | ||
712 | } | ||
713 | |||
714 | const struct dma_map_ops xen_swiotlb_dma_ops = { | ||
715 | .alloc = xen_swiotlb_alloc_coherent, | ||
716 | .free = xen_swiotlb_free_coherent, | ||
717 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | ||
718 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | ||
719 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | ||
720 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, | ||
721 | .map_sg = xen_swiotlb_map_sg_attrs, | ||
722 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, | ||
723 | .map_page = xen_swiotlb_map_page, | ||
724 | .unmap_page = xen_swiotlb_unmap_page, | ||
725 | .dma_supported = xen_swiotlb_dma_supported, | ||
726 | .mmap = xen_swiotlb_dma_mmap, | ||
727 | .get_sgtable = xen_swiotlb_get_sgtable, | ||
728 | .mapping_error = xen_swiotlb_mapping_error, | ||
729 | }; | ||