aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/videobuf-dma-contig.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/videobuf-dma-contig.c')
-rw-r--r--drivers/media/video/videobuf-dma-contig.c417
1 files changed, 417 insertions, 0 deletions
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
new file mode 100644
index 000000000000..2475535cbe7f
--- /dev/null
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -0,0 +1,417 @@
1/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/dma-mapping.h>
20#include <media/videobuf-dma-contig.h>
21
22struct videobuf_dma_contig_memory {
23 u32 magic;
24 void *vaddr;
25 dma_addr_t dma_handle;
26 unsigned long size;
27};
28
29#define MAGIC_DC_MEM 0x0733ac61
30#define MAGIC_CHECK(is, should) \
31 if (unlikely((is) != (should))) { \
32 pr_err("magic mismatch: %x expected %x\n", is, should); \
33 BUG(); \
34 }
35
36static void
37videobuf_vm_open(struct vm_area_struct *vma)
38{
39 struct videobuf_mapping *map = vma->vm_private_data;
40
41 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
42 map, map->count, vma->vm_start, vma->vm_end);
43
44 map->count++;
45}
46
47static void videobuf_vm_close(struct vm_area_struct *vma)
48{
49 struct videobuf_mapping *map = vma->vm_private_data;
50 struct videobuf_queue *q = map->q;
51 int i;
52
53 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
54 map, map->count, vma->vm_start, vma->vm_end);
55
56 map->count--;
57 if (0 == map->count) {
58 struct videobuf_dma_contig_memory *mem;
59
60 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
61 mutex_lock(&q->vb_lock);
62
63 /* We need first to cancel streams, before unmapping */
64 if (q->streaming)
65 videobuf_queue_cancel(q);
66
67 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
68 if (NULL == q->bufs[i])
69 continue;
70
71 if (q->bufs[i]->map != map)
72 continue;
73
74 mem = q->bufs[i]->priv;
75 if (mem) {
76 /* This callback is called only if kernel has
77 allocated memory and this memory is mmapped.
78 In this case, memory should be freed,
79 in order to do memory unmap.
80 */
81
82 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
83
84 /* vfree is not atomic - can't be
85 called with IRQ's disabled
86 */
87 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
88 i, mem->vaddr);
89
90 dma_free_coherent(q->dev, mem->size,
91 mem->vaddr, mem->dma_handle);
92 mem->vaddr = NULL;
93 }
94
95 q->bufs[i]->map = NULL;
96 q->bufs[i]->baddr = 0;
97 }
98
99 kfree(map);
100
101 mutex_unlock(&q->vb_lock);
102 }
103}
104
105static struct vm_operations_struct videobuf_vm_ops = {
106 .open = videobuf_vm_open,
107 .close = videobuf_vm_close,
108};
109
110static void *__videobuf_alloc(size_t size)
111{
112 struct videobuf_dma_contig_memory *mem;
113 struct videobuf_buffer *vb;
114
115 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
116 if (vb) {
117 mem = vb->priv = ((char *)vb) + size;
118 mem->magic = MAGIC_DC_MEM;
119 }
120
121 return vb;
122}
123
124static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
125{
126 struct videobuf_dma_contig_memory *mem = buf->priv;
127
128 BUG_ON(!mem);
129 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
130
131 return mem->vaddr;
132}
133
134static int __videobuf_iolock(struct videobuf_queue *q,
135 struct videobuf_buffer *vb,
136 struct v4l2_framebuffer *fbuf)
137{
138 struct videobuf_dma_contig_memory *mem = vb->priv;
139
140 BUG_ON(!mem);
141 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
142
143 switch (vb->memory) {
144 case V4L2_MEMORY_MMAP:
145 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
146
147 /* All handling should be done by __videobuf_mmap_mapper() */
148 if (!mem->vaddr) {
149 dev_err(q->dev, "memory is not alloced/mmapped.\n");
150 return -EINVAL;
151 }
152 break;
153 case V4L2_MEMORY_USERPTR:
154 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
155
156 /* The only USERPTR currently supported is the one needed for
157 read() method.
158 */
159 if (vb->baddr)
160 return -EINVAL;
161
162 mem->size = PAGE_ALIGN(vb->size);
163 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
164 &mem->dma_handle, GFP_KERNEL);
165 if (!mem->vaddr) {
166 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
167 mem->size);
168 return -ENOMEM;
169 }
170
171 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
172 mem->vaddr, mem->size);
173 break;
174 case V4L2_MEMORY_OVERLAY:
175 default:
176 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
177 __func__);
178 return -EINVAL;
179 }
180
181 return 0;
182}
183
184static int __videobuf_sync(struct videobuf_queue *q,
185 struct videobuf_buffer *buf)
186{
187 struct videobuf_dma_contig_memory *mem = buf->priv;
188
189 BUG_ON(!mem);
190 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
191
192 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
193 DMA_FROM_DEVICE);
194 return 0;
195}
196
197static int __videobuf_mmap_free(struct videobuf_queue *q)
198{
199 unsigned int i;
200
201 dev_dbg(q->dev, "%s\n", __func__);
202 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
203 if (q->bufs[i] && q->bufs[i]->map)
204 return -EBUSY;
205 }
206
207 return 0;
208}
209
210static int __videobuf_mmap_mapper(struct videobuf_queue *q,
211 struct vm_area_struct *vma)
212{
213 struct videobuf_dma_contig_memory *mem;
214 struct videobuf_mapping *map;
215 unsigned int first;
216 int retval;
217 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
218
219 dev_dbg(q->dev, "%s\n", __func__);
220 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
221 return -EINVAL;
222
223 /* look for first buffer to map */
224 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
225 if (!q->bufs[first])
226 continue;
227
228 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
229 continue;
230 if (q->bufs[first]->boff == offset)
231 break;
232 }
233 if (VIDEO_MAX_FRAME == first) {
234 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
235 offset);
236 return -EINVAL;
237 }
238
239 /* create mapping + update buffer list */
240 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
241 if (!map)
242 return -ENOMEM;
243
244 q->bufs[first]->map = map;
245 map->start = vma->vm_start;
246 map->end = vma->vm_end;
247 map->q = q;
248
249 q->bufs[first]->baddr = vma->vm_start;
250
251 mem = q->bufs[first]->priv;
252 BUG_ON(!mem);
253 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
254
255 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
256 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
257 &mem->dma_handle, GFP_KERNEL);
258 if (!mem->vaddr) {
259 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
260 mem->size);
261 goto error;
262 }
263 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
264 mem->vaddr, mem->size);
265
266 /* Try to remap memory */
267
268 size = vma->vm_end - vma->vm_start;
269 size = (size < mem->size) ? size : mem->size;
270
271 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
272 retval = remap_pfn_range(vma, vma->vm_start,
273 mem->dma_handle >> PAGE_SHIFT,
274 size, vma->vm_page_prot);
275 if (retval) {
276 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
277 dma_free_coherent(q->dev, mem->size,
278 mem->vaddr, mem->dma_handle);
279 goto error;
280 }
281
282 vma->vm_ops = &videobuf_vm_ops;
283 vma->vm_flags |= VM_DONTEXPAND;
284 vma->vm_private_data = map;
285
286 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
287 map, q, vma->vm_start, vma->vm_end,
288 (long int) q->bufs[first]->bsize,
289 vma->vm_pgoff, first);
290
291 videobuf_vm_open(vma);
292
293 return 0;
294
295error:
296 kfree(map);
297 return -ENOMEM;
298}
299
300static int __videobuf_copy_to_user(struct videobuf_queue *q,
301 char __user *data, size_t count,
302 int nonblocking)
303{
304 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
305 void *vaddr;
306
307 BUG_ON(!mem);
308 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
309 BUG_ON(!mem->vaddr);
310
311 /* copy to userspace */
312 if (count > q->read_buf->size - q->read_off)
313 count = q->read_buf->size - q->read_off;
314
315 vaddr = mem->vaddr;
316
317 if (copy_to_user(data, vaddr + q->read_off, count))
318 return -EFAULT;
319
320 return count;
321}
322
323static int __videobuf_copy_stream(struct videobuf_queue *q,
324 char __user *data, size_t count, size_t pos,
325 int vbihack, int nonblocking)
326{
327 unsigned int *fc;
328 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
329
330 BUG_ON(!mem);
331 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
332
333 if (vbihack) {
334 /* dirty, undocumented hack -- pass the frame counter
335 * within the last four bytes of each vbi data block.
336 * We need that one to maintain backward compatibility
337 * to all vbi decoding software out there ... */
338 fc = (unsigned int *)mem->vaddr;
339 fc += (q->read_buf->size >> 2) - 1;
340 *fc = q->read_buf->field_count >> 1;
341 dev_dbg(q->dev, "vbihack: %d\n", *fc);
342 }
343
344 /* copy stuff using the common method */
345 count = __videobuf_copy_to_user(q, data, count, nonblocking);
346
347 if ((count == -EFAULT) && (pos == 0))
348 return -EFAULT;
349
350 return count;
351}
352
353static struct videobuf_qtype_ops qops = {
354 .magic = MAGIC_QTYPE_OPS,
355
356 .alloc = __videobuf_alloc,
357 .iolock = __videobuf_iolock,
358 .sync = __videobuf_sync,
359 .mmap_free = __videobuf_mmap_free,
360 .mmap_mapper = __videobuf_mmap_mapper,
361 .video_copy_to_user = __videobuf_copy_to_user,
362 .copy_stream = __videobuf_copy_stream,
363 .vmalloc = __videobuf_to_vmalloc,
364};
365
366void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
367 struct videobuf_queue_ops *ops,
368 struct device *dev,
369 spinlock_t *irqlock,
370 enum v4l2_buf_type type,
371 enum v4l2_field field,
372 unsigned int msize,
373 void *priv)
374{
375 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
376 priv, &qops);
377}
378EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
379
380dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
381{
382 struct videobuf_dma_contig_memory *mem = buf->priv;
383
384 BUG_ON(!mem);
385 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
386
387 return mem->dma_handle;
388}
389EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
390
391void videobuf_dma_contig_free(struct videobuf_queue *q,
392 struct videobuf_buffer *buf)
393{
394 struct videobuf_dma_contig_memory *mem = buf->priv;
395
396 /* mmapped memory can't be freed here, otherwise mmapped region
397 would be released, while still needed. In this case, the memory
398 release should happen inside videobuf_vm_close().
399 So, it should free memory only if the memory were allocated for
400 read() operation.
401 */
402 if ((buf->memory != V4L2_MEMORY_USERPTR) || !buf->baddr)
403 return;
404
405 if (!mem)
406 return;
407
408 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
409
410 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
411 mem->vaddr = NULL;
412}
413EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
414
415MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
416MODULE_AUTHOR("Magnus Damm");
417MODULE_LICENSE("GPL");