aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-coherent.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/dma-coherent.c')
-rw-r--r--drivers/base/dma-coherent.c164
1 files changed, 106 insertions, 58 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
25{ 25{
26 if (dev && dev->dma_mem) 26 if (dev && dev->dma_mem)
27 return dev->dma_mem; 27 return dev->dma_mem;
28 return dma_coherent_default_memory; 28 return NULL;
29} 29}
30 30
31static inline dma_addr_t dma_get_device_base(struct device *dev, 31static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
165} 165}
166EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 166EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
167 167
168/** 168static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
169 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area 169 ssize_t size, dma_addr_t *dma_handle)
170 *
171 * @dev: device from which we allocate memory
172 * @size: size of requested memory area
173 * @dma_handle: This will be filled with the correct dma handle
174 * @ret: This pointer will be filled with the virtual address
175 * to allocated area.
176 *
177 * This function should be only called from per-arch dma_alloc_coherent()
178 * to support allocation from per-device coherent memory pools.
179 *
180 * Returns 0 if dma_alloc_coherent should continue with allocating from
181 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182 */
183int dma_alloc_from_coherent(struct device *dev, ssize_t size,
184 dma_addr_t *dma_handle, void **ret)
185{ 170{
186 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
187 int order = get_order(size); 171 int order = get_order(size);
188 unsigned long flags; 172 unsigned long flags;
189 int pageno; 173 int pageno;
190 int dma_memory_map; 174 int dma_memory_map;
175 void *ret;
191 176
192 if (!mem)
193 return 0;
194
195 *ret = NULL;
196 spin_lock_irqsave(&mem->spinlock, flags); 177 spin_lock_irqsave(&mem->spinlock, flags);
197 178
198 if (unlikely(size > (mem->size << PAGE_SHIFT))) 179 if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
203 goto err; 184 goto err;
204 185
205 /* 186 /*
206 * Memory was found in the per-device area. 187 * Memory was found in the coherent area.
207 */ 188 */
208 *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); 189 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
209 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 190 ret = mem->virt_base + (pageno << PAGE_SHIFT);
210 dma_memory_map = (mem->flags & DMA_MEMORY_MAP); 191 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
211 spin_unlock_irqrestore(&mem->spinlock, flags); 192 spin_unlock_irqrestore(&mem->spinlock, flags);
212 if (dma_memory_map) 193 if (dma_memory_map)
213 memset(*ret, 0, size); 194 memset(ret, 0, size);
214 else 195 else
215 memset_io(*ret, 0, size); 196 memset_io(ret, 0, size);
216 197
217 return 1; 198 return ret;
218 199
219err: 200err:
220 spin_unlock_irqrestore(&mem->spinlock, flags); 201 spin_unlock_irqrestore(&mem->spinlock, flags);
202 return NULL;
203}
204
205/**
206 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
207 * @dev: device from which we allocate memory
208 * @size: size of requested memory area
209 * @dma_handle: This will be filled with the correct dma handle
210 * @ret: This pointer will be filled with the virtual address
211 * to allocated area.
212 *
213 * This function should be only called from per-arch dma_alloc_coherent()
214 * to support allocation from per-device coherent memory pools.
215 *
216 * Returns 0 if dma_alloc_coherent should continue with allocating from
217 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
218 */
219int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
220 dma_addr_t *dma_handle, void **ret)
221{
222 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
223
224 if (!mem)
225 return 0;
226
227 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
228 if (*ret)
229 return 1;
230
221 /* 231 /*
222 * In the case where the allocation can not be satisfied from the 232 * In the case where the allocation can not be satisfied from the
223 * per-device area, try to fall back to generic memory if the 233 * per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
225 */ 235 */
226 return mem->flags & DMA_MEMORY_EXCLUSIVE; 236 return mem->flags & DMA_MEMORY_EXCLUSIVE;
227} 237}
228EXPORT_SYMBOL(dma_alloc_from_coherent); 238EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
229 239
230/** 240void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
231 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
232 * @dev: device from which the memory was allocated
233 * @order: the order of pages allocated
234 * @vaddr: virtual address of allocated pages
235 *
236 * This checks whether the memory was allocated from the per-device
237 * coherent memory pool and if so, releases that memory.
238 *
239 * Returns 1 if we correctly released the memory, or 0 if
240 * dma_release_coherent() should proceed with releasing memory from
241 * generic pools.
242 */
243int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
244{ 241{
245 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 242 if (!dma_coherent_default_memory)
243 return NULL;
244
245 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
246 dma_handle);
247}
246 248
249static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
250 int order, void *vaddr)
251{
247 if (mem && vaddr >= mem->virt_base && vaddr < 252 if (mem && vaddr >= mem->virt_base && vaddr <
248 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 253 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
249 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 254 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
256 } 261 }
257 return 0; 262 return 0;
258} 263}
259EXPORT_SYMBOL(dma_release_from_coherent);
260 264
261/** 265/**
262 * dma_mmap_from_coherent() - try to mmap the memory allocated from 266 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
263 * per-device coherent memory pool to userspace
264 * @dev: device from which the memory was allocated 267 * @dev: device from which the memory was allocated
265 * @vma: vm_area for the userspace memory 268 * @order: the order of pages allocated
266 * @vaddr: cpu address returned by dma_alloc_from_coherent 269 * @vaddr: virtual address of allocated pages
267 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
268 * @ret: result from remap_pfn_range()
269 * 270 *
270 * This checks whether the memory was allocated from the per-device 271 * This checks whether the memory was allocated from the per-device
271 * coherent memory pool and if so, maps that memory to the provided vma. 272 * coherent memory pool and if so, releases that memory.
272 * 273 *
273 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 274 * Returns 1 if we correctly released the memory, or 0 if the caller should
274 * proceed with mapping memory from generic pools. 275 * proceed with releasing memory from generic pools.
275 */ 276 */
276int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 277int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
277 void *vaddr, size_t size, int *ret)
278{ 278{
279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
280 280
281 return __dma_release_from_coherent(mem, order, vaddr);
282}
283EXPORT_SYMBOL(dma_release_from_dev_coherent);
284
285int dma_release_from_global_coherent(int order, void *vaddr)
286{
287 if (!dma_coherent_default_memory)
288 return 0;
289
290 return __dma_release_from_coherent(dma_coherent_default_memory, order,
291 vaddr);
292}
293
294static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
295 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
296{
281 if (mem && vaddr >= mem->virt_base && vaddr + size <= 297 if (mem && vaddr >= mem->virt_base && vaddr + size <=
282 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 298 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
283 unsigned long off = vma->vm_pgoff; 299 unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
296 } 312 }
297 return 0; 313 return 0;
298} 314}
299EXPORT_SYMBOL(dma_mmap_from_coherent); 315
316/**
317 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
318 * @dev: device from which the memory was allocated
319 * @vma: vm_area for the userspace memory
320 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
321 * @size: size of the memory buffer allocated
322 * @ret: result from remap_pfn_range()
323 *
324 * This checks whether the memory was allocated from the per-device
325 * coherent memory pool and if so, maps that memory to the provided vma.
326 *
327 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
328 * proceed with mapping memory from generic pools.
329 */
330int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
331 void *vaddr, size_t size, int *ret)
332{
333 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
334
335 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
336}
337EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
338
339int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
340 size_t size, int *ret)
341{
342 if (!dma_coherent_default_memory)
343 return 0;
344
345 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
346 vaddr, size, ret);
347}
300 348
301/* 349/*
302 * Support for reserved memory regions defined in device tree 350 * Support for reserved memory regions defined in device tree