aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2014-11-24 06:50:31 -0500
committerMauro Carvalho Chehab <mchehab@osg.samsung.com>2014-11-25 06:01:16 -0500
commitd790b7eda953df474f470169ebdf111c02fa7a2d (patch)
tree31ad9dce666141bf0d83989717188fe8700ac310 /drivers/media/v4l2-core
parent0c3a14c177aa85afb991e7c2be3921aa9a52a893 (diff)
[media] vb2-dma-sg: move dma_(un)map_sg here
This moves dma_(un)map_sg to the get_userptr/put_userptr and alloc/put memops of videobuf2-dma-sg.c and adds dma_sync_sg_for_device/cpu to the prepare/finish memops. Now that vb2-dma-sg will sync the buffers for you in the prepare/finish memops we can drop that from the drivers that use dma-sg. For the solo6x10 driver that was a bit more involved because it needs to copy JPEG or MPEG headers to the buffer before returning it to userspace, and that cannot be done in the old place since the buffer there is still setup for DMA access, not for CPU access. However, the buf_finish op is the ideal place to do this. By the time buf_finish is called the buffer is available for CPU access, so copying to the buffer is fine. [mchehab@osg.samsung.com: Fix a compilation breakage: drivers/media/v4l2-core/videobuf2-dma-sg.c:150:19: error: 'struct vb2_dma_sg_buf' has no member named 'dma_sgt'] Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Pawel Osciak <pawel@osciak.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2bf13dc4df34..346e39b2aae8 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -96,6 +96,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
96{ 96{
97 struct vb2_dma_sg_conf *conf = alloc_ctx; 97 struct vb2_dma_sg_conf *conf = alloc_ctx;
98 struct vb2_dma_sg_buf *buf; 98 struct vb2_dma_sg_buf *buf;
99 struct sg_table *sgt;
99 int ret; 100 int ret;
100 int num_pages; 101 int num_pages;
101 102
@@ -128,6 +129,12 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
128 129
129 /* Prevent the device from being released while the buffer is used */ 130 /* Prevent the device from being released while the buffer is used */
130 buf->dev = get_device(conf->dev); 131 buf->dev = get_device(conf->dev);
132
133 sgt = &buf->sg_table;
134 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
135 goto fail_map;
136 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
137
131 buf->handler.refcount = &buf->refcount; 138 buf->handler.refcount = &buf->refcount;
132 buf->handler.put = vb2_dma_sg_put; 139 buf->handler.put = vb2_dma_sg_put;
133 buf->handler.arg = buf; 140 buf->handler.arg = buf;
@@ -138,6 +145,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
138 __func__, buf->num_pages); 145 __func__, buf->num_pages);
139 return buf; 146 return buf;
140 147
148fail_map:
149 put_device(buf->dev);
150 sg_free_table(sgt);
141fail_table_alloc: 151fail_table_alloc:
142 num_pages = buf->num_pages; 152 num_pages = buf->num_pages;
143 while (num_pages--) 153 while (num_pages--)
@@ -152,11 +162,13 @@ fail_pages_array_alloc:
152static void vb2_dma_sg_put(void *buf_priv) 162static void vb2_dma_sg_put(void *buf_priv)
153{ 163{
154 struct vb2_dma_sg_buf *buf = buf_priv; 164 struct vb2_dma_sg_buf *buf = buf_priv;
165 struct sg_table *sgt = &buf->sg_table;
155 int i = buf->num_pages; 166 int i = buf->num_pages;
156 167
157 if (atomic_dec_and_test(&buf->refcount)) { 168 if (atomic_dec_and_test(&buf->refcount)) {
158 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 169 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
159 buf->num_pages); 170 buf->num_pages);
171 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
160 if (buf->vaddr) 172 if (buf->vaddr)
161 vm_unmap_ram(buf->vaddr, buf->num_pages); 173 vm_unmap_ram(buf->vaddr, buf->num_pages);
162 sg_free_table(&buf->sg_table); 174 sg_free_table(&buf->sg_table);
@@ -168,6 +180,22 @@ static void vb2_dma_sg_put(void *buf_priv)
168 } 180 }
169} 181}
170 182
183static void vb2_dma_sg_prepare(void *buf_priv)
184{
185 struct vb2_dma_sg_buf *buf = buf_priv;
186 struct sg_table *sgt = &buf->sg_table;
187
188 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
189}
190
191static void vb2_dma_sg_finish(void *buf_priv)
192{
193 struct vb2_dma_sg_buf *buf = buf_priv;
194 struct sg_table *sgt = &buf->sg_table;
195
196 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
197}
198
171static inline int vma_is_io(struct vm_area_struct *vma) 199static inline int vma_is_io(struct vm_area_struct *vma)
172{ 200{
173 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 201 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
@@ -177,16 +205,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
177 unsigned long size, 205 unsigned long size,
178 enum dma_data_direction dma_dir) 206 enum dma_data_direction dma_dir)
179{ 207{
208 struct vb2_dma_sg_conf *conf = alloc_ctx;
180 struct vb2_dma_sg_buf *buf; 209 struct vb2_dma_sg_buf *buf;
181 unsigned long first, last; 210 unsigned long first, last;
182 int num_pages_from_user; 211 int num_pages_from_user;
183 struct vm_area_struct *vma; 212 struct vm_area_struct *vma;
213 struct sg_table *sgt;
184 214
185 buf = kzalloc(sizeof *buf, GFP_KERNEL); 215 buf = kzalloc(sizeof *buf, GFP_KERNEL);
186 if (!buf) 216 if (!buf)
187 return NULL; 217 return NULL;
188 218
189 buf->vaddr = NULL; 219 buf->vaddr = NULL;
220 buf->dev = conf->dev;
190 buf->dma_dir = dma_dir; 221 buf->dma_dir = dma_dir;
191 buf->offset = vaddr & ~PAGE_MASK; 222 buf->offset = vaddr & ~PAGE_MASK;
192 buf->size = size; 223 buf->size = size;
@@ -246,8 +277,14 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
246 buf->num_pages, buf->offset, size, 0)) 277 buf->num_pages, buf->offset, size, 0))
247 goto userptr_fail_alloc_table_from_pages; 278 goto userptr_fail_alloc_table_from_pages;
248 279
280 sgt = &buf->sg_table;
281 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
282 goto userptr_fail_map;
283 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
249 return buf; 284 return buf;
250 285
286userptr_fail_map:
287 sg_free_table(&buf->sg_table);
251userptr_fail_alloc_table_from_pages: 288userptr_fail_alloc_table_from_pages:
252userptr_fail_get_user_pages: 289userptr_fail_get_user_pages:
253 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 290 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
@@ -270,10 +307,12 @@ userptr_fail_alloc_pages:
270static void vb2_dma_sg_put_userptr(void *buf_priv) 307static void vb2_dma_sg_put_userptr(void *buf_priv)
271{ 308{
272 struct vb2_dma_sg_buf *buf = buf_priv; 309 struct vb2_dma_sg_buf *buf = buf_priv;
310 struct sg_table *sgt = &buf->sg_table;
273 int i = buf->num_pages; 311 int i = buf->num_pages;
274 312
275 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 313 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
276 __func__, buf->num_pages); 314 __func__, buf->num_pages);
315 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
277 if (buf->vaddr) 316 if (buf->vaddr)
278 vm_unmap_ram(buf->vaddr, buf->num_pages); 317 vm_unmap_ram(buf->vaddr, buf->num_pages);
279 sg_free_table(&buf->sg_table); 318 sg_free_table(&buf->sg_table);
@@ -360,6 +399,8 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
360 .put = vb2_dma_sg_put, 399 .put = vb2_dma_sg_put,
361 .get_userptr = vb2_dma_sg_get_userptr, 400 .get_userptr = vb2_dma_sg_get_userptr,
362 .put_userptr = vb2_dma_sg_put_userptr, 401 .put_userptr = vb2_dma_sg_put_userptr,
402 .prepare = vb2_dma_sg_prepare,
403 .finish = vb2_dma_sg_finish,
363 .vaddr = vb2_dma_sg_vaddr, 404 .vaddr = vb2_dma_sg_vaddr,
364 .mmap = vb2_dma_sg_mmap, 405 .mmap = vb2_dma_sg_mmap,
365 .num_users = vb2_dma_sg_num_users, 406 .num_users = vb2_dma_sg_num_users,