aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core/videobuf2-dma-contig.c
diff options
context:
space:
mode:
authorTomasz Stanislawski <t.stanislaws@samsung.com>2012-06-14 09:37:42 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2012-11-25 14:12:03 -0500
commite15dab752d4c588544ccabdbe020a7cc092e23c8 (patch)
tree3d8c754781eed6607d12af4b92c0403d6a6ae58a /drivers/media/v4l2-core/videobuf2-dma-contig.c
parent40d8b7669ba352c30179c0deee8ce281fce99429 (diff)
[media] v4l: vb2-dma-contig: add support for scatterlist in userptr mode
This patch introduces usage of dma_map_sg to map memory behind a userspace pointer to a device as dma-contiguous mapping. This patch contains some of the code kindly provided by Marek Szyprowski <m.szyprowski@samsung.com> and Kamil Debski <k.debski@samsung.com> and Andrzej Pietrasiewicz <andrzej.p@samsung.com>. Kind thanks for bug reports from Laurent Pinchart <laurent.pinchart@ideasonboard.com> and Seung-Woo Kim <sw0312.kim@samsung.com>. Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/v4l2-core/videobuf2-dma-contig.c')
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c226
1 files changed, 210 insertions, 16 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index daac2b2de357..8486e06cb618 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -11,6 +11,8 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/scatterlist.h>
15#include <linux/sched.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
16 18
@@ -27,6 +29,8 @@ struct vb2_dc_buf {
27 void *vaddr; 29 void *vaddr;
28 unsigned long size; 30 unsigned long size;
29 dma_addr_t dma_addr; 31 dma_addr_t dma_addr;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
30 34
31 /* MMAP related */ 35 /* MMAP related */
32 struct vb2_vmarea_handler handler; 36 struct vb2_vmarea_handler handler;
@@ -37,6 +41,44 @@ struct vb2_dc_buf {
37}; 41};
38 42
39/*********************************************/ 43/*********************************************/
44/* scatterlist table functions */
45/*********************************************/
46
47
48static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
49 void (*cb)(struct page *pg))
50{
51 struct scatterlist *s;
52 unsigned int i;
53
54 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
55 struct page *page = sg_page(s);
56 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
57 >> PAGE_SHIFT;
58 unsigned int j;
59
60 for (j = 0; j < n_pages; ++j, ++page)
61 cb(page);
62 }
63}
64
65static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
66{
67 struct scatterlist *s;
68 dma_addr_t expected = sg_dma_address(sgt->sgl);
69 unsigned int i;
70 unsigned long size = 0;
71
72 for_each_sg(sgt->sgl, s, sgt->nents, i) {
73 if (sg_dma_address(s) != expected)
74 break;
75 expected = sg_dma_address(s) + sg_dma_len(s);
76 size += sg_dma_len(s);
77 }
78 return size;
79}
80
81/*********************************************/
40/* callbacks for all buffers */ 82/* callbacks for all buffers */
41/*********************************************/ 83/*********************************************/
42 84
@@ -122,42 +164,194 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
122/* callbacks for USERPTR buffers */ 164/* callbacks for USERPTR buffers */
123/*********************************************/ 165/*********************************************/
124 166
167static inline int vma_is_io(struct vm_area_struct *vma)
168{
169 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
170}
171
172static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
173 int n_pages, struct vm_area_struct *vma, int write)
174{
175 if (vma_is_io(vma)) {
176 unsigned int i;
177
178 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
179 unsigned long pfn;
180 int ret = follow_pfn(vma, start, &pfn);
181
182 if (ret) {
183 pr_err("no page for address %lu\n", start);
184 return ret;
185 }
186 pages[i] = pfn_to_page(pfn);
187 }
188 } else {
189 int n;
190
191 n = get_user_pages(current, current->mm, start & PAGE_MASK,
192 n_pages, write, 1, pages, NULL);
193 /* negative error means that no page was pinned */
194 n = max(n, 0);
195 if (n != n_pages) {
196 pr_err("got only %d of %d user pages\n", n, n_pages);
197 while (n)
198 put_page(pages[--n]);
199 return -EFAULT;
200 }
201 }
202
203 return 0;
204}
205
206static void vb2_dc_put_dirty_page(struct page *page)
207{
208 set_page_dirty_lock(page);
209 put_page(page);
210}
211
212static void vb2_dc_put_userptr(void *buf_priv)
213{
214 struct vb2_dc_buf *buf = buf_priv;
215 struct sg_table *sgt = buf->dma_sgt;
216
217 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
218 if (!vma_is_io(buf->vma))
219 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
220
221 sg_free_table(sgt);
222 kfree(sgt);
223 vb2_put_vma(buf->vma);
224 kfree(buf);
225}
226
125static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 227static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
126 unsigned long size, int write) 228 unsigned long size, int write)
127{ 229{
230 struct vb2_dc_conf *conf = alloc_ctx;
128 struct vb2_dc_buf *buf; 231 struct vb2_dc_buf *buf;
232 unsigned long start;
233 unsigned long end;
234 unsigned long offset;
235 struct page **pages;
236 int n_pages;
237 int ret = 0;
129 struct vm_area_struct *vma; 238 struct vm_area_struct *vma;
130 dma_addr_t dma_addr = 0; 239 struct sg_table *sgt;
131 int ret; 240 unsigned long contig_size;
132 241
133 buf = kzalloc(sizeof *buf, GFP_KERNEL); 242 buf = kzalloc(sizeof *buf, GFP_KERNEL);
134 if (!buf) 243 if (!buf)
135 return ERR_PTR(-ENOMEM); 244 return ERR_PTR(-ENOMEM);
136 245
137 ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr); 246 buf->dev = conf->dev;
247 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
248
249 start = vaddr & PAGE_MASK;
250 offset = vaddr & ~PAGE_MASK;
251 end = PAGE_ALIGN(vaddr + size);
252 n_pages = (end - start) >> PAGE_SHIFT;
253
254 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
255 if (!pages) {
256 ret = -ENOMEM;
257 pr_err("failed to allocate pages table\n");
258 goto fail_buf;
259 }
260
261 /* current->mm->mmap_sem is taken by videobuf2 core */
262 vma = find_vma(current->mm, vaddr);
263 if (!vma) {
264 pr_err("no vma for address %lu\n", vaddr);
265 ret = -EFAULT;
266 goto fail_pages;
267 }
268
269 if (vma->vm_end < vaddr + size) {
270 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
271 ret = -EFAULT;
272 goto fail_pages;
273 }
274
275 buf->vma = vb2_get_vma(vma);
276 if (!buf->vma) {
277 pr_err("failed to copy vma\n");
278 ret = -ENOMEM;
279 goto fail_pages;
280 }
281
282 /* extract page list from userspace mapping */
283 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
138 if (ret) { 284 if (ret) {
139 printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n", 285 pr_err("failed to get user pages\n");
140 vaddr); 286 goto fail_vma;
141 kfree(buf); 287 }
142 return ERR_PTR(ret); 288
289 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
290 if (!sgt) {
291 pr_err("failed to allocate sg table\n");
292 ret = -ENOMEM;
293 goto fail_get_user_pages;
294 }
295
296 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
297 offset, size, GFP_KERNEL);
298 if (ret) {
299 pr_err("failed to initialize sg table\n");
300 goto fail_sgt;
301 }
302
303 /* pages are no longer needed */
304 kfree(pages);
305 pages = NULL;
306
307 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
308 buf->dma_dir);
309 if (sgt->nents <= 0) {
310 pr_err("failed to map scatterlist\n");
311 ret = -EIO;
312 goto fail_sgt_init;
313 }
314
315 contig_size = vb2_dc_get_contiguous_size(sgt);
316 if (contig_size < size) {
317 pr_err("contiguous mapping is too small %lu/%lu\n",
318 contig_size, size);
319 ret = -EFAULT;
320 goto fail_map_sg;
143 } 321 }
144 322
323 buf->dma_addr = sg_dma_address(sgt->sgl);
145 buf->size = size; 324 buf->size = size;
146 buf->dma_addr = dma_addr; 325 buf->dma_sgt = sgt;
147 buf->vma = vma;
148 326
149 return buf; 327 return buf;
150}
151 328
152static void vb2_dc_put_userptr(void *mem_priv) 329fail_map_sg:
153{ 330 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
154 struct vb2_dc_buf *buf = mem_priv;
155 331
156 if (!buf) 332fail_sgt_init:
157 return; 333 if (!vma_is_io(buf->vma))
334 vb2_dc_sgt_foreach_page(sgt, put_page);
335 sg_free_table(sgt);
336
337fail_sgt:
338 kfree(sgt);
158 339
340fail_get_user_pages:
341 if (pages && !vma_is_io(buf->vma))
342 while (n_pages)
343 put_page(pages[--n_pages]);
344
345fail_vma:
159 vb2_put_vma(buf->vma); 346 vb2_put_vma(buf->vma);
347
348fail_pages:
349 kfree(pages); /* kfree is NULL-proof */
350
351fail_buf:
160 kfree(buf); 352 kfree(buf);
353
354 return ERR_PTR(ret);
161} 355}
162 356
163/*********************************************/ 357/*********************************************/