diff options
author | Hans Verkuil <hans.verkuil@cisco.com> | 2014-11-18 07:51:08 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2014-11-25 06:09:19 -0500 |
commit | 251a79f8f5adfd816de4e052b5e3619a5a1d0910 (patch) | |
tree | a0fbe2c43a495a274de4c083c42a7cc8dc99055b | |
parent | f5294f455afd30bdc90f31d6d0101bb773e9ddba (diff) |
[media] vb2: use dma_map_sg_attrs to prevent unnecessary sync
By default dma_map_sg syncs the mapped buffer to the device. But
buf_prepare expects a buffer syncs for the cpu and the buffer
will be synced to the device in the prepare memop.
The reverse is true for dma_unmap_sg, buf_finish and the finish
memop.
To prevent unnecessary syncs we ask dma_(un)map_sg to skip the
sync.
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Acked-by: Pawel Osciak <pawel@osciak.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 24 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 33 |
2 files changed, 47 insertions, 10 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 0bfc488c8812..b481d20c8372 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -511,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv) | |||
511 | struct sg_table *sgt = buf->dma_sgt; | 511 | struct sg_table *sgt = buf->dma_sgt; |
512 | 512 | ||
513 | if (sgt) { | 513 | if (sgt) { |
514 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | 514 | DEFINE_DMA_ATTRS(attrs); |
515 | |||
516 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
517 | /* | ||
518 | * No need to sync to CPU, it's already synced to the CPU | ||
519 | * since the finish() memop will have been called before this. | ||
520 | */ | ||
521 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | ||
522 | buf->dma_dir, &attrs); | ||
515 | if (!vma_is_io(buf->vma)) | 523 | if (!vma_is_io(buf->vma)) |
516 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | 524 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); |
517 | 525 | ||
@@ -568,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
568 | struct sg_table *sgt; | 576 | struct sg_table *sgt; |
569 | unsigned long contig_size; | 577 | unsigned long contig_size; |
570 | unsigned long dma_align = dma_get_cache_alignment(); | 578 | unsigned long dma_align = dma_get_cache_alignment(); |
579 | DEFINE_DMA_ATTRS(attrs); | ||
580 | |||
581 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
571 | 582 | ||
572 | /* Only cache aligned DMA transfers are reliable */ | 583 | /* Only cache aligned DMA transfers are reliable */ |
573 | if (!IS_ALIGNED(vaddr | size, dma_align)) { | 584 | if (!IS_ALIGNED(vaddr | size, dma_align)) { |
@@ -654,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
654 | kfree(pages); | 665 | kfree(pages); |
655 | pages = NULL; | 666 | pages = NULL; |
656 | 667 | ||
657 | sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, | 668 | /* |
658 | buf->dma_dir); | 669 | * No need to sync to the device, this will happen later when the |
670 | * prepare() memop is called. | ||
671 | */ | ||
672 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | ||
673 | buf->dma_dir, &attrs); | ||
659 | if (sgt->nents <= 0) { | 674 | if (sgt->nents <= 0) { |
660 | pr_err("failed to map scatterlist\n"); | 675 | pr_err("failed to map scatterlist\n"); |
661 | ret = -EIO; | 676 | ret = -EIO; |
@@ -677,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
677 | return buf; | 692 | return buf; |
678 | 693 | ||
679 | fail_map_sg: | 694 | fail_map_sg: |
680 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | 695 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
696 | buf->dma_dir, &attrs); | ||
681 | 697 | ||
682 | fail_sgt_init: | 698 | fail_sgt_init: |
683 | if (!vma_is_io(buf->vma)) | 699 | if (!vma_is_io(buf->vma)) |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 0566e94a5a1d..b1838abb6d00 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -107,6 +107,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, | |||
107 | struct sg_table *sgt; | 107 | struct sg_table *sgt; |
108 | int ret; | 108 | int ret; |
109 | int num_pages; | 109 | int num_pages; |
110 | DEFINE_DMA_ATTRS(attrs); | ||
111 | |||
112 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
110 | 113 | ||
111 | if (WARN_ON(alloc_ctx == NULL)) | 114 | if (WARN_ON(alloc_ctx == NULL)) |
112 | return NULL; | 115 | return NULL; |
@@ -140,9 +143,13 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, | |||
140 | buf->dev = get_device(conf->dev); | 143 | buf->dev = get_device(conf->dev); |
141 | 144 | ||
142 | sgt = &buf->sg_table; | 145 | sgt = &buf->sg_table; |
143 | if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0) | 146 | /* |
147 | * No need to sync to the device, this will happen later when the | ||
148 | * prepare() memop is called. | ||
149 | */ | ||
150 | if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, | ||
151 | buf->dma_dir, &attrs) == 0) | ||
144 | goto fail_map; | 152 | goto fail_map; |
145 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | ||
146 | 153 | ||
147 | buf->handler.refcount = &buf->refcount; | 154 | buf->handler.refcount = &buf->refcount; |
148 | buf->handler.put = vb2_dma_sg_put; | 155 | buf->handler.put = vb2_dma_sg_put; |
@@ -175,9 +182,13 @@ static void vb2_dma_sg_put(void *buf_priv) | |||
175 | int i = buf->num_pages; | 182 | int i = buf->num_pages; |
176 | 183 | ||
177 | if (atomic_dec_and_test(&buf->refcount)) { | 184 | if (atomic_dec_and_test(&buf->refcount)) { |
185 | DEFINE_DMA_ATTRS(attrs); | ||
186 | |||
187 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
178 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, | 188 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
179 | buf->num_pages); | 189 | buf->num_pages); |
180 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | 190 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, |
191 | buf->dma_dir, &attrs); | ||
181 | if (buf->vaddr) | 192 | if (buf->vaddr) |
182 | vm_unmap_ram(buf->vaddr, buf->num_pages); | 193 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
183 | sg_free_table(buf->dma_sgt); | 194 | sg_free_table(buf->dma_sgt); |
@@ -228,6 +239,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
228 | int num_pages_from_user; | 239 | int num_pages_from_user; |
229 | struct vm_area_struct *vma; | 240 | struct vm_area_struct *vma; |
230 | struct sg_table *sgt; | 241 | struct sg_table *sgt; |
242 | DEFINE_DMA_ATTRS(attrs); | ||
243 | |||
244 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
231 | 245 | ||
232 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 246 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
233 | if (!buf) | 247 | if (!buf) |
@@ -296,9 +310,13 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
296 | goto userptr_fail_alloc_table_from_pages; | 310 | goto userptr_fail_alloc_table_from_pages; |
297 | 311 | ||
298 | sgt = &buf->sg_table; | 312 | sgt = &buf->sg_table; |
299 | if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0) | 313 | /* |
314 | * No need to sync to the device, this will happen later when the | ||
315 | * prepare() memop is called. | ||
316 | */ | ||
317 | if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, | ||
318 | buf->dma_dir, &attrs) == 0) | ||
300 | goto userptr_fail_map; | 319 | goto userptr_fail_map; |
301 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | ||
302 | return buf; | 320 | return buf; |
303 | 321 | ||
304 | userptr_fail_map: | 322 | userptr_fail_map: |
@@ -327,10 +345,13 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) | |||
327 | struct vb2_dma_sg_buf *buf = buf_priv; | 345 | struct vb2_dma_sg_buf *buf = buf_priv; |
328 | struct sg_table *sgt = &buf->sg_table; | 346 | struct sg_table *sgt = &buf->sg_table; |
329 | int i = buf->num_pages; | 347 | int i = buf->num_pages; |
348 | DEFINE_DMA_ATTRS(attrs); | ||
349 | |||
350 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
330 | 351 | ||
331 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", | 352 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
332 | __func__, buf->num_pages); | 353 | __func__, buf->num_pages); |
333 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | 354 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs); |
334 | if (buf->vaddr) | 355 | if (buf->vaddr) |
335 | vm_unmap_ram(buf->vaddr, buf->num_pages); | 356 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
336 | sg_free_table(buf->dma_sgt); | 357 | sg_free_table(buf->dma_sgt); |