diff options
Diffstat (limited to 'drivers/media/v4l2-core/videobuf2-dma-contig.c')
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4a02ade14b4f..2bdffd383572 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -229,7 +229,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
229 | 229 | ||
230 | struct vb2_dc_attachment { | 230 | struct vb2_dc_attachment { |
231 | struct sg_table sgt; | 231 | struct sg_table sgt; |
232 | enum dma_data_direction dir; | 232 | enum dma_data_direction dma_dir; |
233 | }; | 233 | }; |
234 | 234 | ||
235 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | 235 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, |
@@ -264,7 +264,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | |||
264 | wr = sg_next(wr); | 264 | wr = sg_next(wr); |
265 | } | 265 | } |
266 | 266 | ||
267 | attach->dir = DMA_NONE; | 267 | attach->dma_dir = DMA_NONE; |
268 | dbuf_attach->priv = attach; | 268 | dbuf_attach->priv = attach; |
269 | 269 | ||
270 | return 0; | 270 | return 0; |
@@ -282,16 +282,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, | |||
282 | sgt = &attach->sgt; | 282 | sgt = &attach->sgt; |
283 | 283 | ||
284 | /* release the scatterlist cache */ | 284 | /* release the scatterlist cache */ |
285 | if (attach->dir != DMA_NONE) | 285 | if (attach->dma_dir != DMA_NONE) |
286 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | 286 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
287 | attach->dir); | 287 | attach->dma_dir); |
288 | sg_free_table(sgt); | 288 | sg_free_table(sgt); |
289 | kfree(attach); | 289 | kfree(attach); |
290 | db_attach->priv = NULL; | 290 | db_attach->priv = NULL; |
291 | } | 291 | } |
292 | 292 | ||
293 | static struct sg_table *vb2_dc_dmabuf_ops_map( | 293 | static struct sg_table *vb2_dc_dmabuf_ops_map( |
294 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | 294 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) |
295 | { | 295 | { |
296 | struct vb2_dc_attachment *attach = db_attach->priv; | 296 | struct vb2_dc_attachment *attach = db_attach->priv; |
297 | /* stealing dmabuf mutex to serialize map/unmap operations */ | 297 | /* stealing dmabuf mutex to serialize map/unmap operations */ |
@@ -303,27 +303,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( | |||
303 | 303 | ||
304 | sgt = &attach->sgt; | 304 | sgt = &attach->sgt; |
305 | /* return previously mapped sg table */ | 305 | /* return previously mapped sg table */ |
306 | if (attach->dir == dir) { | 306 | if (attach->dma_dir == dma_dir) { |
307 | mutex_unlock(lock); | 307 | mutex_unlock(lock); |
308 | return sgt; | 308 | return sgt; |
309 | } | 309 | } |
310 | 310 | ||
311 | /* release any previous cache */ | 311 | /* release any previous cache */ |
312 | if (attach->dir != DMA_NONE) { | 312 | if (attach->dma_dir != DMA_NONE) { |
313 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | 313 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
314 | attach->dir); | 314 | attach->dma_dir); |
315 | attach->dir = DMA_NONE; | 315 | attach->dma_dir = DMA_NONE; |
316 | } | 316 | } |
317 | 317 | ||
318 | /* mapping to the client with new direction */ | 318 | /* mapping to the client with new direction */ |
319 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | 319 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); |
320 | if (ret <= 0) { | 320 | if (ret <= 0) { |
321 | pr_err("failed to map scatterlist\n"); | 321 | pr_err("failed to map scatterlist\n"); |
322 | mutex_unlock(lock); | 322 | mutex_unlock(lock); |
323 | return ERR_PTR(-EIO); | 323 | return ERR_PTR(-EIO); |
324 | } | 324 | } |
325 | 325 | ||
326 | attach->dir = dir; | 326 | attach->dma_dir = dma_dir; |
327 | 327 | ||
328 | mutex_unlock(lock); | 328 | mutex_unlock(lock); |
329 | 329 | ||
@@ -331,7 +331,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( | |||
331 | } | 331 | } |
332 | 332 | ||
333 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | 333 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, |
334 | struct sg_table *sgt, enum dma_data_direction dir) | 334 | struct sg_table *sgt, enum dma_data_direction dma_dir) |
335 | { | 335 | { |
336 | /* nothing to be done here */ | 336 | /* nothing to be done here */ |
337 | } | 337 | } |
@@ -460,7 +460,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, | |||
460 | } | 460 | } |
461 | 461 | ||
462 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | 462 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, |
463 | int n_pages, struct vm_area_struct *vma, int write) | 463 | int n_pages, struct vm_area_struct *vma, |
464 | enum dma_data_direction dma_dir) | ||
464 | { | 465 | { |
465 | if (vma_is_io(vma)) { | 466 | if (vma_is_io(vma)) { |
466 | unsigned int i; | 467 | unsigned int i; |
@@ -482,7 +483,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | |||
482 | int n; | 483 | int n; |
483 | 484 | ||
484 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | 485 | n = get_user_pages(current, current->mm, start & PAGE_MASK, |
485 | n_pages, write, 1, pages, NULL); | 486 | n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); |
486 | /* negative error means that no page was pinned */ | 487 | /* negative error means that no page was pinned */ |
487 | n = max(n, 0); | 488 | n = max(n, 0); |
488 | if (n != n_pages) { | 489 | if (n != n_pages) { |
@@ -551,7 +552,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn | |||
551 | #endif | 552 | #endif |
552 | 553 | ||
553 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | 554 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
554 | unsigned long size, int write) | 555 | unsigned long size, enum dma_data_direction dma_dir) |
555 | { | 556 | { |
556 | struct vb2_dc_conf *conf = alloc_ctx; | 557 | struct vb2_dc_conf *conf = alloc_ctx; |
557 | struct vb2_dc_buf *buf; | 558 | struct vb2_dc_buf *buf; |
@@ -582,7 +583,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
582 | return ERR_PTR(-ENOMEM); | 583 | return ERR_PTR(-ENOMEM); |
583 | 584 | ||
584 | buf->dev = conf->dev; | 585 | buf->dev = conf->dev; |
585 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 586 | buf->dma_dir = dma_dir; |
586 | 587 | ||
587 | start = vaddr & PAGE_MASK; | 588 | start = vaddr & PAGE_MASK; |
588 | offset = vaddr & ~PAGE_MASK; | 589 | offset = vaddr & ~PAGE_MASK; |
@@ -618,7 +619,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
618 | } | 619 | } |
619 | 620 | ||
620 | /* extract page list from userspace mapping */ | 621 | /* extract page list from userspace mapping */ |
621 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | 622 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, |
623 | dma_dir == DMA_FROM_DEVICE); | ||
622 | if (ret) { | 624 | if (ret) { |
623 | unsigned long pfn; | 625 | unsigned long pfn; |
624 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | 626 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { |
@@ -782,7 +784,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv) | |||
782 | } | 784 | } |
783 | 785 | ||
784 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | 786 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, |
785 | unsigned long size, int write) | 787 | unsigned long size, enum dma_data_direction dma_dir) |
786 | { | 788 | { |
787 | struct vb2_dc_conf *conf = alloc_ctx; | 789 | struct vb2_dc_conf *conf = alloc_ctx; |
788 | struct vb2_dc_buf *buf; | 790 | struct vb2_dc_buf *buf; |
@@ -804,7 +806,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |||
804 | return dba; | 806 | return dba; |
805 | } | 807 | } |
806 | 808 | ||
807 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 809 | buf->dma_dir = dma_dir; |
808 | buf->size = size; | 810 | buf->size = size; |
809 | buf->db_attach = dba; | 811 | buf->db_attach = dba; |
810 | 812 | ||