diff options
author | Hans Verkuil <hansverk@cisco.com> | 2014-11-18 07:51:05 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2014-11-25 06:07:08 -0500 |
commit | 4650635069dfc9f7d59a55e048560bf05e3ca442 (patch) | |
tree | d715373fadb1cc50334ba471a1d155001f109f7f | |
parent | 041c7b6ac74ee7a4375faf80e2864fc2bce78edc (diff) |
[media] vb2-vmalloc: add support for dmabuf exports
Add support for DMABUF exporting to the vb2-vmalloc implementation.
All memory models now have support for both importing and exporting of DMABUFs.
Signed-off-by: Hans Verkuil <hansverk@cisco.com>
Acked-by: Pawel Osciak <pawel@osciak.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 171 |
1 files changed, 171 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index bba2460961df..3966b121e466 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c | |||
@@ -213,6 +213,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
213 | } | 213 | } |
214 | 214 | ||
215 | /*********************************************/ | 215 | /*********************************************/ |
216 | /* DMABUF ops for exporters */ | ||
217 | /*********************************************/ | ||
218 | |||
219 | struct vb2_vmalloc_attachment { | ||
220 | struct sg_table sgt; | ||
221 | enum dma_data_direction dma_dir; | ||
222 | }; | ||
223 | |||
224 | static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | ||
225 | struct dma_buf_attachment *dbuf_attach) | ||
226 | { | ||
227 | struct vb2_vmalloc_attachment *attach; | ||
228 | struct vb2_vmalloc_buf *buf = dbuf->priv; | ||
229 | int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; | ||
230 | struct sg_table *sgt; | ||
231 | struct scatterlist *sg; | ||
232 | void *vaddr = buf->vaddr; | ||
233 | int ret; | ||
234 | int i; | ||
235 | |||
236 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | ||
237 | if (!attach) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | sgt = &attach->sgt; | ||
241 | ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); | ||
242 | if (ret) { | ||
243 | kfree(attach); | ||
244 | return ret; | ||
245 | } | ||
246 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
247 | struct page *page = vmalloc_to_page(vaddr); | ||
248 | |||
249 | if (!page) { | ||
250 | sg_free_table(sgt); | ||
251 | kfree(attach); | ||
252 | return -ENOMEM; | ||
253 | } | ||
254 | sg_set_page(sg, page, PAGE_SIZE, 0); | ||
255 | vaddr += PAGE_SIZE; | ||
256 | } | ||
257 | |||
258 | attach->dma_dir = DMA_NONE; | ||
259 | dbuf_attach->priv = attach; | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, | ||
264 | struct dma_buf_attachment *db_attach) | ||
265 | { | ||
266 | struct vb2_vmalloc_attachment *attach = db_attach->priv; | ||
267 | struct sg_table *sgt; | ||
268 | |||
269 | if (!attach) | ||
270 | return; | ||
271 | |||
272 | sgt = &attach->sgt; | ||
273 | |||
274 | /* release the scatterlist cache */ | ||
275 | if (attach->dma_dir != DMA_NONE) | ||
276 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
277 | attach->dma_dir); | ||
278 | sg_free_table(sgt); | ||
279 | kfree(attach); | ||
280 | db_attach->priv = NULL; | ||
281 | } | ||
282 | |||
283 | static struct sg_table *vb2_vmalloc_dmabuf_ops_map( | ||
284 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) | ||
285 | { | ||
286 | struct vb2_vmalloc_attachment *attach = db_attach->priv; | ||
287 | /* stealing dmabuf mutex to serialize map/unmap operations */ | ||
288 | struct mutex *lock = &db_attach->dmabuf->lock; | ||
289 | struct sg_table *sgt; | ||
290 | int ret; | ||
291 | |||
292 | mutex_lock(lock); | ||
293 | |||
294 | sgt = &attach->sgt; | ||
295 | /* return previously mapped sg table */ | ||
296 | if (attach->dma_dir == dma_dir) { | ||
297 | mutex_unlock(lock); | ||
298 | return sgt; | ||
299 | } | ||
300 | |||
301 | /* release any previous cache */ | ||
302 | if (attach->dma_dir != DMA_NONE) { | ||
303 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
304 | attach->dma_dir); | ||
305 | attach->dma_dir = DMA_NONE; | ||
306 | } | ||
307 | |||
308 | /* mapping to the client with new direction */ | ||
309 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); | ||
310 | if (ret <= 0) { | ||
311 | pr_err("failed to map scatterlist\n"); | ||
312 | mutex_unlock(lock); | ||
313 | return ERR_PTR(-EIO); | ||
314 | } | ||
315 | |||
316 | attach->dma_dir = dma_dir; | ||
317 | |||
318 | mutex_unlock(lock); | ||
319 | |||
320 | return sgt; | ||
321 | } | ||
322 | |||
323 | static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | ||
324 | struct sg_table *sgt, enum dma_data_direction dma_dir) | ||
325 | { | ||
326 | /* nothing to be done here */ | ||
327 | } | ||
328 | |||
329 | static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) | ||
330 | { | ||
331 | /* drop reference obtained in vb2_vmalloc_get_dmabuf */ | ||
332 | vb2_vmalloc_put(dbuf->priv); | ||
333 | } | ||
334 | |||
335 | static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | ||
336 | { | ||
337 | struct vb2_vmalloc_buf *buf = dbuf->priv; | ||
338 | |||
339 | return buf->vaddr + pgnum * PAGE_SIZE; | ||
340 | } | ||
341 | |||
342 | static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) | ||
343 | { | ||
344 | struct vb2_vmalloc_buf *buf = dbuf->priv; | ||
345 | |||
346 | return buf->vaddr; | ||
347 | } | ||
348 | |||
349 | static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, | ||
350 | struct vm_area_struct *vma) | ||
351 | { | ||
352 | return vb2_vmalloc_mmap(dbuf->priv, vma); | ||
353 | } | ||
354 | |||
355 | static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { | ||
356 | .attach = vb2_vmalloc_dmabuf_ops_attach, | ||
357 | .detach = vb2_vmalloc_dmabuf_ops_detach, | ||
358 | .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, | ||
359 | .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, | ||
360 | .kmap = vb2_vmalloc_dmabuf_ops_kmap, | ||
361 | .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, | ||
362 | .vmap = vb2_vmalloc_dmabuf_ops_vmap, | ||
363 | .mmap = vb2_vmalloc_dmabuf_ops_mmap, | ||
364 | .release = vb2_vmalloc_dmabuf_ops_release, | ||
365 | }; | ||
366 | |||
367 | static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) | ||
368 | { | ||
369 | struct vb2_vmalloc_buf *buf = buf_priv; | ||
370 | struct dma_buf *dbuf; | ||
371 | |||
372 | if (WARN_ON(!buf->vaddr)) | ||
373 | return NULL; | ||
374 | |||
375 | dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL); | ||
376 | if (IS_ERR(dbuf)) | ||
377 | return NULL; | ||
378 | |||
379 | /* dmabuf keeps reference to vb2 buffer */ | ||
380 | atomic_inc(&buf->refcount); | ||
381 | |||
382 | return dbuf; | ||
383 | } | ||
384 | |||
385 | /*********************************************/ | ||
216 | /* callbacks for DMABUF buffers */ | 386 | /* callbacks for DMABUF buffers */ |
217 | /*********************************************/ | 387 | /*********************************************/ |
218 | 388 | ||
@@ -268,6 +438,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = { | |||
268 | .put = vb2_vmalloc_put, | 438 | .put = vb2_vmalloc_put, |
269 | .get_userptr = vb2_vmalloc_get_userptr, | 439 | .get_userptr = vb2_vmalloc_get_userptr, |
270 | .put_userptr = vb2_vmalloc_put_userptr, | 440 | .put_userptr = vb2_vmalloc_put_userptr, |
441 | .get_dmabuf = vb2_vmalloc_get_dmabuf, | ||
271 | .map_dmabuf = vb2_vmalloc_map_dmabuf, | 442 | .map_dmabuf = vb2_vmalloc_map_dmabuf, |
272 | .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, | 443 | .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, |
273 | .attach_dmabuf = vb2_vmalloc_attach_dmabuf, | 444 | .attach_dmabuf = vb2_vmalloc_attach_dmabuf, |