diff options
Diffstat (limited to 'drivers/media/v4l2-core/videobuf2-dma-contig.c')
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 700 |
1 files changed, 645 insertions, 55 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4b7132660a93..10beaee7f0ae 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -10,7 +10,10 @@ | |||
10 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/dma-buf.h> | ||
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/scatterlist.h> | ||
16 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
15 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
16 | 19 | ||
@@ -23,40 +26,158 @@ struct vb2_dc_conf { | |||
23 | }; | 26 | }; |
24 | 27 | ||
25 | struct vb2_dc_buf { | 28 | struct vb2_dc_buf { |
26 | struct vb2_dc_conf *conf; | 29 | struct device *dev; |
27 | void *vaddr; | 30 | void *vaddr; |
28 | dma_addr_t dma_addr; | ||
29 | unsigned long size; | 31 | unsigned long size; |
30 | struct vm_area_struct *vma; | 32 | dma_addr_t dma_addr; |
31 | atomic_t refcount; | 33 | enum dma_data_direction dma_dir; |
34 | struct sg_table *dma_sgt; | ||
35 | |||
36 | /* MMAP related */ | ||
32 | struct vb2_vmarea_handler handler; | 37 | struct vb2_vmarea_handler handler; |
38 | atomic_t refcount; | ||
39 | struct sg_table *sgt_base; | ||
40 | |||
41 | /* USERPTR related */ | ||
42 | struct vm_area_struct *vma; | ||
43 | |||
44 | /* DMABUF related */ | ||
45 | struct dma_buf_attachment *db_attach; | ||
33 | }; | 46 | }; |
34 | 47 | ||
35 | static void vb2_dma_contig_put(void *buf_priv); | 48 | /*********************************************/ |
49 | /* scatterlist table functions */ | ||
50 | /*********************************************/ | ||
51 | |||
52 | |||
53 | static void vb2_dc_sgt_foreach_page(struct sg_table *sgt, | ||
54 | void (*cb)(struct page *pg)) | ||
55 | { | ||
56 | struct scatterlist *s; | ||
57 | unsigned int i; | ||
58 | |||
59 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | ||
60 | struct page *page = sg_page(s); | ||
61 | unsigned int n_pages = PAGE_ALIGN(s->offset + s->length) | ||
62 | >> PAGE_SHIFT; | ||
63 | unsigned int j; | ||
64 | |||
65 | for (j = 0; j < n_pages; ++j, ++page) | ||
66 | cb(page); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) | ||
71 | { | ||
72 | struct scatterlist *s; | ||
73 | dma_addr_t expected = sg_dma_address(sgt->sgl); | ||
74 | unsigned int i; | ||
75 | unsigned long size = 0; | ||
76 | |||
77 | for_each_sg(sgt->sgl, s, sgt->nents, i) { | ||
78 | if (sg_dma_address(s) != expected) | ||
79 | break; | ||
80 | expected = sg_dma_address(s) + sg_dma_len(s); | ||
81 | size += sg_dma_len(s); | ||
82 | } | ||
83 | return size; | ||
84 | } | ||
85 | |||
86 | /*********************************************/ | ||
87 | /* callbacks for all buffers */ | ||
88 | /*********************************************/ | ||
89 | |||
90 | static void *vb2_dc_cookie(void *buf_priv) | ||
91 | { | ||
92 | struct vb2_dc_buf *buf = buf_priv; | ||
93 | |||
94 | return &buf->dma_addr; | ||
95 | } | ||
96 | |||
97 | static void *vb2_dc_vaddr(void *buf_priv) | ||
98 | { | ||
99 | struct vb2_dc_buf *buf = buf_priv; | ||
100 | |||
101 | return buf->vaddr; | ||
102 | } | ||
103 | |||
104 | static unsigned int vb2_dc_num_users(void *buf_priv) | ||
105 | { | ||
106 | struct vb2_dc_buf *buf = buf_priv; | ||
107 | |||
108 | return atomic_read(&buf->refcount); | ||
109 | } | ||
110 | |||
111 | static void vb2_dc_prepare(void *buf_priv) | ||
112 | { | ||
113 | struct vb2_dc_buf *buf = buf_priv; | ||
114 | struct sg_table *sgt = buf->dma_sgt; | ||
115 | |||
116 | /* DMABUF exporter will flush the cache for us */ | ||
117 | if (!sgt || buf->db_attach) | ||
118 | return; | ||
119 | |||
120 | dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | ||
121 | } | ||
122 | |||
123 | static void vb2_dc_finish(void *buf_priv) | ||
124 | { | ||
125 | struct vb2_dc_buf *buf = buf_priv; | ||
126 | struct sg_table *sgt = buf->dma_sgt; | ||
127 | |||
128 | /* DMABUF exporter will flush the cache for us */ | ||
129 | if (!sgt || buf->db_attach) | ||
130 | return; | ||
131 | |||
132 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | ||
133 | } | ||
134 | |||
135 | /*********************************************/ | ||
136 | /* callbacks for MMAP buffers */ | ||
137 | /*********************************************/ | ||
138 | |||
139 | static void vb2_dc_put(void *buf_priv) | ||
140 | { | ||
141 | struct vb2_dc_buf *buf = buf_priv; | ||
142 | |||
143 | if (!atomic_dec_and_test(&buf->refcount)) | ||
144 | return; | ||
145 | |||
146 | if (buf->sgt_base) { | ||
147 | sg_free_table(buf->sgt_base); | ||
148 | kfree(buf->sgt_base); | ||
149 | } | ||
150 | dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); | ||
151 | put_device(buf->dev); | ||
152 | kfree(buf); | ||
153 | } | ||
36 | 154 | ||
37 | static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size) | 155 | static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size) |
38 | { | 156 | { |
39 | struct vb2_dc_conf *conf = alloc_ctx; | 157 | struct vb2_dc_conf *conf = alloc_ctx; |
158 | struct device *dev = conf->dev; | ||
40 | struct vb2_dc_buf *buf; | 159 | struct vb2_dc_buf *buf; |
41 | 160 | ||
42 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 161 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
43 | if (!buf) | 162 | if (!buf) |
44 | return ERR_PTR(-ENOMEM); | 163 | return ERR_PTR(-ENOMEM); |
45 | 164 | ||
46 | buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr, | 165 | /* align image size to PAGE_SIZE */ |
47 | GFP_KERNEL); | 166 | size = PAGE_ALIGN(size); |
167 | |||
168 | buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL); | ||
48 | if (!buf->vaddr) { | 169 | if (!buf->vaddr) { |
49 | dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", | 170 | dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); |
50 | size); | ||
51 | kfree(buf); | 171 | kfree(buf); |
52 | return ERR_PTR(-ENOMEM); | 172 | return ERR_PTR(-ENOMEM); |
53 | } | 173 | } |
54 | 174 | ||
55 | buf->conf = conf; | 175 | /* Prevent the device from being released while the buffer is used */ |
176 | buf->dev = get_device(dev); | ||
56 | buf->size = size; | 177 | buf->size = size; |
57 | 178 | ||
58 | buf->handler.refcount = &buf->refcount; | 179 | buf->handler.refcount = &buf->refcount; |
59 | buf->handler.put = vb2_dma_contig_put; | 180 | buf->handler.put = vb2_dc_put; |
60 | buf->handler.arg = buf; | 181 | buf->handler.arg = buf; |
61 | 182 | ||
62 | atomic_inc(&buf->refcount); | 183 | atomic_inc(&buf->refcount); |
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size) | |||
64 | return buf; | 185 | return buf; |
65 | } | 186 | } |
66 | 187 | ||
67 | static void vb2_dma_contig_put(void *buf_priv) | 188 | static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) |
68 | { | 189 | { |
69 | struct vb2_dc_buf *buf = buf_priv; | 190 | struct vb2_dc_buf *buf = buf_priv; |
191 | int ret; | ||
70 | 192 | ||
71 | if (atomic_dec_and_test(&buf->refcount)) { | 193 | if (!buf) { |
72 | dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr, | 194 | printk(KERN_ERR "No buffer to map\n"); |
73 | buf->dma_addr); | 195 | return -EINVAL; |
74 | kfree(buf); | 196 | } |
197 | |||
198 | /* | ||
199 | * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to | ||
200 | * map whole buffer | ||
201 | */ | ||
202 | vma->vm_pgoff = 0; | ||
203 | |||
204 | ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, | ||
205 | buf->dma_addr, buf->size); | ||
206 | |||
207 | if (ret) { | ||
208 | pr_err("Remapping memory failed, error: %d\n", ret); | ||
209 | return ret; | ||
75 | } | 210 | } |
211 | |||
212 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | ||
213 | vma->vm_private_data = &buf->handler; | ||
214 | vma->vm_ops = &vb2_common_vm_ops; | ||
215 | |||
216 | vma->vm_ops->open(vma); | ||
217 | |||
218 | pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", | ||
219 | __func__, (unsigned long)buf->dma_addr, vma->vm_start, | ||
220 | buf->size); | ||
221 | |||
222 | return 0; | ||
76 | } | 223 | } |
77 | 224 | ||
78 | static void *vb2_dma_contig_cookie(void *buf_priv) | 225 | /*********************************************/ |
226 | /* DMABUF ops for exporters */ | ||
227 | /*********************************************/ | ||
228 | |||
229 | struct vb2_dc_attachment { | ||
230 | struct sg_table sgt; | ||
231 | enum dma_data_direction dir; | ||
232 | }; | ||
233 | |||
234 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | ||
235 | struct dma_buf_attachment *dbuf_attach) | ||
79 | { | 236 | { |
80 | struct vb2_dc_buf *buf = buf_priv; | 237 | struct vb2_dc_attachment *attach; |
238 | unsigned int i; | ||
239 | struct scatterlist *rd, *wr; | ||
240 | struct sg_table *sgt; | ||
241 | struct vb2_dc_buf *buf = dbuf->priv; | ||
242 | int ret; | ||
81 | 243 | ||
82 | return &buf->dma_addr; | 244 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
245 | if (!attach) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | sgt = &attach->sgt; | ||
249 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | ||
250 | * map the same scatter list to multiple attachments at the same time. | ||
251 | */ | ||
252 | ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); | ||
253 | if (ret) { | ||
254 | kfree(attach); | ||
255 | return -ENOMEM; | ||
256 | } | ||
257 | |||
258 | rd = buf->sgt_base->sgl; | ||
259 | wr = sgt->sgl; | ||
260 | for (i = 0; i < sgt->orig_nents; ++i) { | ||
261 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | ||
262 | rd = sg_next(rd); | ||
263 | wr = sg_next(wr); | ||
264 | } | ||
265 | |||
266 | attach->dir = DMA_NONE; | ||
267 | dbuf_attach->priv = attach; | ||
268 | |||
269 | return 0; | ||
83 | } | 270 | } |
84 | 271 | ||
85 | static void *vb2_dma_contig_vaddr(void *buf_priv) | 272 | static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, |
273 | struct dma_buf_attachment *db_attach) | ||
86 | { | 274 | { |
87 | struct vb2_dc_buf *buf = buf_priv; | 275 | struct vb2_dc_attachment *attach = db_attach->priv; |
88 | if (!buf) | 276 | struct sg_table *sgt; |
89 | return NULL; | 277 | |
278 | if (!attach) | ||
279 | return; | ||
280 | |||
281 | sgt = &attach->sgt; | ||
282 | |||
283 | /* release the scatterlist cache */ | ||
284 | if (attach->dir != DMA_NONE) | ||
285 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
286 | attach->dir); | ||
287 | sg_free_table(sgt); | ||
288 | kfree(attach); | ||
289 | db_attach->priv = NULL; | ||
290 | } | ||
291 | |||
292 | static struct sg_table *vb2_dc_dmabuf_ops_map( | ||
293 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | ||
294 | { | ||
295 | struct vb2_dc_attachment *attach = db_attach->priv; | ||
296 | /* stealing dmabuf mutex to serialize map/unmap operations */ | ||
297 | struct mutex *lock = &db_attach->dmabuf->lock; | ||
298 | struct sg_table *sgt; | ||
299 | int ret; | ||
300 | |||
301 | mutex_lock(lock); | ||
302 | |||
303 | sgt = &attach->sgt; | ||
304 | /* return previously mapped sg table */ | ||
305 | if (attach->dir == dir) { | ||
306 | mutex_unlock(lock); | ||
307 | return sgt; | ||
308 | } | ||
309 | |||
310 | /* release any previous cache */ | ||
311 | if (attach->dir != DMA_NONE) { | ||
312 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | ||
313 | attach->dir); | ||
314 | attach->dir = DMA_NONE; | ||
315 | } | ||
316 | |||
317 | /* mapping to the client with new direction */ | ||
318 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | ||
319 | if (ret <= 0) { | ||
320 | pr_err("failed to map scatterlist\n"); | ||
321 | mutex_unlock(lock); | ||
322 | return ERR_PTR(-EIO); | ||
323 | } | ||
324 | |||
325 | attach->dir = dir; | ||
326 | |||
327 | mutex_unlock(lock); | ||
328 | |||
329 | return sgt; | ||
330 | } | ||
331 | |||
332 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | ||
333 | struct sg_table *sgt, enum dma_data_direction dir) | ||
334 | { | ||
335 | /* nothing to be done here */ | ||
336 | } | ||
337 | |||
338 | static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) | ||
339 | { | ||
340 | /* drop reference obtained in vb2_dc_get_dmabuf */ | ||
341 | vb2_dc_put(dbuf->priv); | ||
342 | } | ||
343 | |||
344 | static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | ||
345 | { | ||
346 | struct vb2_dc_buf *buf = dbuf->priv; | ||
347 | |||
348 | return buf->vaddr + pgnum * PAGE_SIZE; | ||
349 | } | ||
350 | |||
351 | static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) | ||
352 | { | ||
353 | struct vb2_dc_buf *buf = dbuf->priv; | ||
90 | 354 | ||
91 | return buf->vaddr; | 355 | return buf->vaddr; |
92 | } | 356 | } |
93 | 357 | ||
94 | static unsigned int vb2_dma_contig_num_users(void *buf_priv) | 358 | static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, |
359 | struct vm_area_struct *vma) | ||
95 | { | 360 | { |
96 | struct vb2_dc_buf *buf = buf_priv; | 361 | return vb2_dc_mmap(dbuf->priv, vma); |
362 | } | ||
97 | 363 | ||
98 | return atomic_read(&buf->refcount); | 364 | static struct dma_buf_ops vb2_dc_dmabuf_ops = { |
365 | .attach = vb2_dc_dmabuf_ops_attach, | ||
366 | .detach = vb2_dc_dmabuf_ops_detach, | ||
367 | .map_dma_buf = vb2_dc_dmabuf_ops_map, | ||
368 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, | ||
369 | .kmap = vb2_dc_dmabuf_ops_kmap, | ||
370 | .kmap_atomic = vb2_dc_dmabuf_ops_kmap, | ||
371 | .vmap = vb2_dc_dmabuf_ops_vmap, | ||
372 | .mmap = vb2_dc_dmabuf_ops_mmap, | ||
373 | .release = vb2_dc_dmabuf_ops_release, | ||
374 | }; | ||
375 | |||
376 | static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | ||
377 | { | ||
378 | int ret; | ||
379 | struct sg_table *sgt; | ||
380 | |||
381 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | ||
382 | if (!sgt) { | ||
383 | dev_err(buf->dev, "failed to alloc sg table\n"); | ||
384 | return NULL; | ||
385 | } | ||
386 | |||
387 | ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, | ||
388 | buf->size); | ||
389 | if (ret < 0) { | ||
390 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); | ||
391 | kfree(sgt); | ||
392 | return NULL; | ||
393 | } | ||
394 | |||
395 | return sgt; | ||
99 | } | 396 | } |
100 | 397 | ||
101 | static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma) | 398 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) |
102 | { | 399 | { |
103 | struct vb2_dc_buf *buf = buf_priv; | 400 | struct vb2_dc_buf *buf = buf_priv; |
401 | struct dma_buf *dbuf; | ||
104 | 402 | ||
105 | if (!buf) { | 403 | if (!buf->sgt_base) |
106 | printk(KERN_ERR "No buffer to map\n"); | 404 | buf->sgt_base = vb2_dc_get_base_sgt(buf); |
107 | return -EINVAL; | 405 | |
406 | if (WARN_ON(!buf->sgt_base)) | ||
407 | return NULL; | ||
408 | |||
409 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); | ||
410 | if (IS_ERR(dbuf)) | ||
411 | return NULL; | ||
412 | |||
413 | /* dmabuf keeps reference to vb2 buffer */ | ||
414 | atomic_inc(&buf->refcount); | ||
415 | |||
416 | return dbuf; | ||
417 | } | ||
418 | |||
419 | /*********************************************/ | ||
420 | /* callbacks for USERPTR buffers */ | ||
421 | /*********************************************/ | ||
422 | |||
423 | static inline int vma_is_io(struct vm_area_struct *vma) | ||
424 | { | ||
425 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); | ||
426 | } | ||
427 | |||
428 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | ||
429 | int n_pages, struct vm_area_struct *vma, int write) | ||
430 | { | ||
431 | if (vma_is_io(vma)) { | ||
432 | unsigned int i; | ||
433 | |||
434 | for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { | ||
435 | unsigned long pfn; | ||
436 | int ret = follow_pfn(vma, start, &pfn); | ||
437 | |||
438 | if (ret) { | ||
439 | pr_err("no page for address %lu\n", start); | ||
440 | return ret; | ||
441 | } | ||
442 | pages[i] = pfn_to_page(pfn); | ||
443 | } | ||
444 | } else { | ||
445 | int n; | ||
446 | |||
447 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | ||
448 | n_pages, write, 1, pages, NULL); | ||
449 | /* negative error means that no page was pinned */ | ||
450 | n = max(n, 0); | ||
451 | if (n != n_pages) { | ||
452 | pr_err("got only %d of %d user pages\n", n, n_pages); | ||
453 | while (n) | ||
454 | put_page(pages[--n]); | ||
455 | return -EFAULT; | ||
456 | } | ||
108 | } | 457 | } |
109 | 458 | ||
110 | return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size, | 459 | return 0; |
111 | &vb2_common_vm_ops, &buf->handler); | ||
112 | } | 460 | } |
113 | 461 | ||
114 | static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr, | 462 | static void vb2_dc_put_dirty_page(struct page *page) |
115 | unsigned long size, int write) | ||
116 | { | 463 | { |
464 | set_page_dirty_lock(page); | ||
465 | put_page(page); | ||
466 | } | ||
467 | |||
468 | static void vb2_dc_put_userptr(void *buf_priv) | ||
469 | { | ||
470 | struct vb2_dc_buf *buf = buf_priv; | ||
471 | struct sg_table *sgt = buf->dma_sgt; | ||
472 | |||
473 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | ||
474 | if (!vma_is_io(buf->vma)) | ||
475 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | ||
476 | |||
477 | sg_free_table(sgt); | ||
478 | kfree(sgt); | ||
479 | vb2_put_vma(buf->vma); | ||
480 | kfree(buf); | ||
481 | } | ||
482 | |||
483 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | ||
484 | unsigned long size, int write) | ||
485 | { | ||
486 | struct vb2_dc_conf *conf = alloc_ctx; | ||
117 | struct vb2_dc_buf *buf; | 487 | struct vb2_dc_buf *buf; |
488 | unsigned long start; | ||
489 | unsigned long end; | ||
490 | unsigned long offset; | ||
491 | struct page **pages; | ||
492 | int n_pages; | ||
493 | int ret = 0; | ||
118 | struct vm_area_struct *vma; | 494 | struct vm_area_struct *vma; |
119 | dma_addr_t dma_addr = 0; | 495 | struct sg_table *sgt; |
120 | int ret; | 496 | unsigned long contig_size; |
497 | unsigned long dma_align = dma_get_cache_alignment(); | ||
498 | |||
499 | /* Only cache aligned DMA transfers are reliable */ | ||
500 | if (!IS_ALIGNED(vaddr | size, dma_align)) { | ||
501 | pr_debug("user data must be aligned to %lu bytes\n", dma_align); | ||
502 | return ERR_PTR(-EINVAL); | ||
503 | } | ||
504 | |||
505 | if (!size) { | ||
506 | pr_debug("size is zero\n"); | ||
507 | return ERR_PTR(-EINVAL); | ||
508 | } | ||
121 | 509 | ||
122 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 510 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
123 | if (!buf) | 511 | if (!buf) |
124 | return ERR_PTR(-ENOMEM); | 512 | return ERR_PTR(-ENOMEM); |
125 | 513 | ||
126 | ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr); | 514 | buf->dev = conf->dev; |
515 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
516 | |||
517 | start = vaddr & PAGE_MASK; | ||
518 | offset = vaddr & ~PAGE_MASK; | ||
519 | end = PAGE_ALIGN(vaddr + size); | ||
520 | n_pages = (end - start) >> PAGE_SHIFT; | ||
521 | |||
522 | pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); | ||
523 | if (!pages) { | ||
524 | ret = -ENOMEM; | ||
525 | pr_err("failed to allocate pages table\n"); | ||
526 | goto fail_buf; | ||
527 | } | ||
528 | |||
529 | /* current->mm->mmap_sem is taken by videobuf2 core */ | ||
530 | vma = find_vma(current->mm, vaddr); | ||
531 | if (!vma) { | ||
532 | pr_err("no vma for address %lu\n", vaddr); | ||
533 | ret = -EFAULT; | ||
534 | goto fail_pages; | ||
535 | } | ||
536 | |||
537 | if (vma->vm_end < vaddr + size) { | ||
538 | pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); | ||
539 | ret = -EFAULT; | ||
540 | goto fail_pages; | ||
541 | } | ||
542 | |||
543 | buf->vma = vb2_get_vma(vma); | ||
544 | if (!buf->vma) { | ||
545 | pr_err("failed to copy vma\n"); | ||
546 | ret = -ENOMEM; | ||
547 | goto fail_pages; | ||
548 | } | ||
549 | |||
550 | /* extract page list from userspace mapping */ | ||
551 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | ||
127 | if (ret) { | 552 | if (ret) { |
128 | printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n", | 553 | pr_err("failed to get user pages\n"); |
129 | vaddr); | 554 | goto fail_vma; |
130 | kfree(buf); | 555 | } |
131 | return ERR_PTR(ret); | 556 | |
557 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
558 | if (!sgt) { | ||
559 | pr_err("failed to allocate sg table\n"); | ||
560 | ret = -ENOMEM; | ||
561 | goto fail_get_user_pages; | ||
562 | } | ||
563 | |||
564 | ret = sg_alloc_table_from_pages(sgt, pages, n_pages, | ||
565 | offset, size, GFP_KERNEL); | ||
566 | if (ret) { | ||
567 | pr_err("failed to initialize sg table\n"); | ||
568 | goto fail_sgt; | ||
132 | } | 569 | } |
133 | 570 | ||
571 | /* pages are no longer needed */ | ||
572 | kfree(pages); | ||
573 | pages = NULL; | ||
574 | |||
575 | sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, | ||
576 | buf->dma_dir); | ||
577 | if (sgt->nents <= 0) { | ||
578 | pr_err("failed to map scatterlist\n"); | ||
579 | ret = -EIO; | ||
580 | goto fail_sgt_init; | ||
581 | } | ||
582 | |||
583 | contig_size = vb2_dc_get_contiguous_size(sgt); | ||
584 | if (contig_size < size) { | ||
585 | pr_err("contiguous mapping is too small %lu/%lu\n", | ||
586 | contig_size, size); | ||
587 | ret = -EFAULT; | ||
588 | goto fail_map_sg; | ||
589 | } | ||
590 | |||
591 | buf->dma_addr = sg_dma_address(sgt->sgl); | ||
134 | buf->size = size; | 592 | buf->size = size; |
135 | buf->dma_addr = dma_addr; | 593 | buf->dma_sgt = sgt; |
136 | buf->vma = vma; | ||
137 | 594 | ||
138 | return buf; | 595 | return buf; |
596 | |||
597 | fail_map_sg: | ||
598 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | ||
599 | |||
600 | fail_sgt_init: | ||
601 | if (!vma_is_io(buf->vma)) | ||
602 | vb2_dc_sgt_foreach_page(sgt, put_page); | ||
603 | sg_free_table(sgt); | ||
604 | |||
605 | fail_sgt: | ||
606 | kfree(sgt); | ||
607 | |||
608 | fail_get_user_pages: | ||
609 | if (pages && !vma_is_io(buf->vma)) | ||
610 | while (n_pages) | ||
611 | put_page(pages[--n_pages]); | ||
612 | |||
613 | fail_vma: | ||
614 | vb2_put_vma(buf->vma); | ||
615 | |||
616 | fail_pages: | ||
617 | kfree(pages); /* kfree is NULL-proof */ | ||
618 | |||
619 | fail_buf: | ||
620 | kfree(buf); | ||
621 | |||
622 | return ERR_PTR(ret); | ||
139 | } | 623 | } |
140 | 624 | ||
141 | static void vb2_dma_contig_put_userptr(void *mem_priv) | 625 | /*********************************************/ |
626 | /* callbacks for DMABUF buffers */ | ||
627 | /*********************************************/ | ||
628 | |||
629 | static int vb2_dc_map_dmabuf(void *mem_priv) | ||
142 | { | 630 | { |
143 | struct vb2_dc_buf *buf = mem_priv; | 631 | struct vb2_dc_buf *buf = mem_priv; |
632 | struct sg_table *sgt; | ||
633 | unsigned long contig_size; | ||
144 | 634 | ||
145 | if (!buf) | 635 | if (WARN_ON(!buf->db_attach)) { |
636 | pr_err("trying to pin a non attached buffer\n"); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | if (WARN_ON(buf->dma_sgt)) { | ||
641 | pr_err("dmabuf buffer is already pinned\n"); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | /* get the associated scatterlist for this buffer */ | ||
646 | sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); | ||
647 | if (IS_ERR_OR_NULL(sgt)) { | ||
648 | pr_err("Error getting dmabuf scatterlist\n"); | ||
649 | return -EINVAL; | ||
650 | } | ||
651 | |||
652 | /* checking if dmabuf is big enough to store contiguous chunk */ | ||
653 | contig_size = vb2_dc_get_contiguous_size(sgt); | ||
654 | if (contig_size < buf->size) { | ||
655 | pr_err("contiguous chunk is too small %lu/%lu b\n", | ||
656 | contig_size, buf->size); | ||
657 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | ||
658 | return -EFAULT; | ||
659 | } | ||
660 | |||
661 | buf->dma_addr = sg_dma_address(sgt->sgl); | ||
662 | buf->dma_sgt = sgt; | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | static void vb2_dc_unmap_dmabuf(void *mem_priv) | ||
668 | { | ||
669 | struct vb2_dc_buf *buf = mem_priv; | ||
670 | struct sg_table *sgt = buf->dma_sgt; | ||
671 | |||
672 | if (WARN_ON(!buf->db_attach)) { | ||
673 | pr_err("trying to unpin a not attached buffer\n"); | ||
146 | return; | 674 | return; |
675 | } | ||
147 | 676 | ||
148 | vb2_put_vma(buf->vma); | 677 | if (WARN_ON(!sgt)) { |
678 | pr_err("dmabuf buffer is already unpinned\n"); | ||
679 | return; | ||
680 | } | ||
681 | |||
682 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | ||
683 | |||
684 | buf->dma_addr = 0; | ||
685 | buf->dma_sgt = NULL; | ||
686 | } | ||
687 | |||
688 | static void vb2_dc_detach_dmabuf(void *mem_priv) | ||
689 | { | ||
690 | struct vb2_dc_buf *buf = mem_priv; | ||
691 | |||
692 | /* if vb2 works correctly you should never detach mapped buffer */ | ||
693 | if (WARN_ON(buf->dma_addr)) | ||
694 | vb2_dc_unmap_dmabuf(buf); | ||
695 | |||
696 | /* detach this attachment */ | ||
697 | dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); | ||
149 | kfree(buf); | 698 | kfree(buf); |
150 | } | 699 | } |
151 | 700 | ||
701 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | ||
702 | unsigned long size, int write) | ||
703 | { | ||
704 | struct vb2_dc_conf *conf = alloc_ctx; | ||
705 | struct vb2_dc_buf *buf; | ||
706 | struct dma_buf_attachment *dba; | ||
707 | |||
708 | if (dbuf->size < size) | ||
709 | return ERR_PTR(-EFAULT); | ||
710 | |||
711 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||
712 | if (!buf) | ||
713 | return ERR_PTR(-ENOMEM); | ||
714 | |||
715 | buf->dev = conf->dev; | ||
716 | /* create attachment for the dmabuf with the user device */ | ||
717 | dba = dma_buf_attach(dbuf, buf->dev); | ||
718 | if (IS_ERR(dba)) { | ||
719 | pr_err("failed to attach dmabuf\n"); | ||
720 | kfree(buf); | ||
721 | return dba; | ||
722 | } | ||
723 | |||
724 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
725 | buf->size = size; | ||
726 | buf->db_attach = dba; | ||
727 | |||
728 | return buf; | ||
729 | } | ||
730 | |||
731 | /*********************************************/ | ||
732 | /* DMA CONTIG exported functions */ | ||
733 | /*********************************************/ | ||
734 | |||
152 | const struct vb2_mem_ops vb2_dma_contig_memops = { | 735 | const struct vb2_mem_ops vb2_dma_contig_memops = { |
153 | .alloc = vb2_dma_contig_alloc, | 736 | .alloc = vb2_dc_alloc, |
154 | .put = vb2_dma_contig_put, | 737 | .put = vb2_dc_put, |
155 | .cookie = vb2_dma_contig_cookie, | 738 | .get_dmabuf = vb2_dc_get_dmabuf, |
156 | .vaddr = vb2_dma_contig_vaddr, | 739 | .cookie = vb2_dc_cookie, |
157 | .mmap = vb2_dma_contig_mmap, | 740 | .vaddr = vb2_dc_vaddr, |
158 | .get_userptr = vb2_dma_contig_get_userptr, | 741 | .mmap = vb2_dc_mmap, |
159 | .put_userptr = vb2_dma_contig_put_userptr, | 742 | .get_userptr = vb2_dc_get_userptr, |
160 | .num_users = vb2_dma_contig_num_users, | 743 | .put_userptr = vb2_dc_put_userptr, |
744 | .prepare = vb2_dc_prepare, | ||
745 | .finish = vb2_dc_finish, | ||
746 | .map_dmabuf = vb2_dc_map_dmabuf, | ||
747 | .unmap_dmabuf = vb2_dc_unmap_dmabuf, | ||
748 | .attach_dmabuf = vb2_dc_attach_dmabuf, | ||
749 | .detach_dmabuf = vb2_dc_detach_dmabuf, | ||
750 | .num_users = vb2_dc_num_users, | ||
161 | }; | 751 | }; |
162 | EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); | 752 | EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); |
163 | 753 | ||