summaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-mapping.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 22:20:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 22:20:54 -0400
commitf72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch)
tree90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /drivers/base/dma-mapping.c
parent2c669275dc3245e2866a0eea15bda8ec8d1ab8db (diff)
parent1655cf8829d82d367d8fdb5cb58e5885d7d2a391 (diff)
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
Diffstat (limited to 'drivers/base/dma-mapping.c')
-rw-r--r--drivers/base/dma-mapping.c60
1 files changed, 18 insertions, 42 deletions
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 9dbef4d1baa4..5096755d185e 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -22,20 +22,15 @@ struct dma_devres {
22 size_t size; 22 size_t size;
23 void *vaddr; 23 void *vaddr;
24 dma_addr_t dma_handle; 24 dma_addr_t dma_handle;
25 unsigned long attrs;
25}; 26};
26 27
27static void dmam_coherent_release(struct device *dev, void *res) 28static void dmam_release(struct device *dev, void *res)
28{ 29{
29 struct dma_devres *this = res; 30 struct dma_devres *this = res;
30 31
31 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); 32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
32} 33 this->attrs);
33
34static void dmam_noncoherent_release(struct device *dev, void *res)
35{
36 struct dma_devres *this = res;
37
38 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
39} 34}
40 35
41static int dmam_match(struct device *dev, void *res, void *match_data) 36static int dmam_match(struct device *dev, void *res, void *match_data)
@@ -69,7 +64,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size,
69 struct dma_devres *dr; 64 struct dma_devres *dr;
70 void *vaddr; 65 void *vaddr;
71 66
72 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp); 67 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
73 if (!dr) 68 if (!dr)
74 return NULL; 69 return NULL;
75 70
@@ -104,35 +99,35 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
104 struct dma_devres match_data = { size, vaddr, dma_handle }; 99 struct dma_devres match_data = { size, vaddr, dma_handle };
105 100
106 dma_free_coherent(dev, size, vaddr, dma_handle); 101 dma_free_coherent(dev, size, vaddr, dma_handle);
107 WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match, 102 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
108 &match_data));
109} 103}
110EXPORT_SYMBOL(dmam_free_coherent); 104EXPORT_SYMBOL(dmam_free_coherent);
111 105
112/** 106/**
113 * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent() 107 * dmam_alloc_attrs - Managed dma_alloc_attrs()
114 * @dev: Device to allocate non_coherent memory for 108 * @dev: Device to allocate non_coherent memory for
115 * @size: Size of allocation 109 * @size: Size of allocation
116 * @dma_handle: Out argument for allocated DMA handle 110 * @dma_handle: Out argument for allocated DMA handle
117 * @gfp: Allocation flags 111 * @gfp: Allocation flags
112 * @attrs: Flags in the DMA_ATTR_* namespace.
118 * 113 *
119 * Managed dma_alloc_noncoherent(). Memory allocated using this 114 * Managed dma_alloc_attrs(). Memory allocated using this function will be
120 * function will be automatically released on driver detach. 115 * automatically released on driver detach.
121 * 116 *
122 * RETURNS: 117 * RETURNS:
123 * Pointer to allocated memory on success, NULL on failure. 118 * Pointer to allocated memory on success, NULL on failure.
124 */ 119 */
125void *dmam_alloc_noncoherent(struct device *dev, size_t size, 120void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
126 dma_addr_t *dma_handle, gfp_t gfp) 121 gfp_t gfp, unsigned long attrs)
127{ 122{
128 struct dma_devres *dr; 123 struct dma_devres *dr;
129 void *vaddr; 124 void *vaddr;
130 125
131 dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp); 126 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
132 if (!dr) 127 if (!dr)
133 return NULL; 128 return NULL;
134 129
135 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); 130 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
136 if (!vaddr) { 131 if (!vaddr) {
137 devres_free(dr); 132 devres_free(dr);
138 return NULL; 133 return NULL;
@@ -141,32 +136,13 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size,
141 dr->vaddr = vaddr; 136 dr->vaddr = vaddr;
142 dr->dma_handle = *dma_handle; 137 dr->dma_handle = *dma_handle;
143 dr->size = size; 138 dr->size = size;
139 dr->attrs = attrs;
144 140
145 devres_add(dev, dr); 141 devres_add(dev, dr);
146 142
147 return vaddr; 143 return vaddr;
148} 144}
149EXPORT_SYMBOL(dmam_alloc_noncoherent); 145EXPORT_SYMBOL(dmam_alloc_attrs);
150
151/**
152 * dmam_free_coherent - Managed dma_free_noncoherent()
153 * @dev: Device to free noncoherent memory for
154 * @size: Size of allocation
155 * @vaddr: Virtual address of the memory to free
156 * @dma_handle: DMA handle of the memory to free
157 *
158 * Managed dma_free_noncoherent().
159 */
160void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
161 dma_addr_t dma_handle)
162{
163 struct dma_devres match_data = { size, vaddr, dma_handle };
164
165 dma_free_noncoherent(dev, size, vaddr, dma_handle);
166 WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
167 &match_data));
168}
169EXPORT_SYMBOL(dmam_free_noncoherent);
170 146
171#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 147#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
172 148
@@ -251,7 +227,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
251 void *cpu_addr, dma_addr_t dma_addr, size_t size) 227 void *cpu_addr, dma_addr_t dma_addr, size_t size)
252{ 228{
253 int ret = -ENXIO; 229 int ret = -ENXIO;
254#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) 230#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
255 unsigned long user_count = vma_pages(vma); 231 unsigned long user_count = vma_pages(vma);
256 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 232 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
257 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); 233 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -268,7 +244,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
268 user_count << PAGE_SHIFT, 244 user_count << PAGE_SHIFT,
269 vma->vm_page_prot); 245 vma->vm_page_prot);
270 } 246 }
271#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 247#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
272 248
273 return ret; 249 return ret;
274} 250}