diff options
author | Hans Verkuil <hans.verkuil@cisco.com> | 2014-11-18 07:50:58 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2014-11-25 05:50:28 -0500 |
commit | cd474037c4a9a9c15cab46ff26ceeed1bbda6abb (patch) | |
tree | 17ce0741bc7879d60ca83c116afdf73254cb35e4 /drivers/media/v4l2-core | |
parent | cf227429c74778cc9f8caf734d1f161f8f021915 (diff) |
[media] vb2: replace 'write' by 'dma_dir'
The 'write' argument is very ambiguous. I first assumed that if it is 1,
then we're doing video output but instead it meant the reverse.
Since it is used to setup the dma_dir value anyway it is now replaced by
the correct dma_dir value which is unambiguous.
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Acked-by: Pawel Osciak <pawel@osciak.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 10 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 40 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 13 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 16 |
4 files changed, 43 insertions, 36 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index f2e43de3dd87..573f6fb9b40f 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -1358,7 +1358,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1358 | void *mem_priv; | 1358 | void *mem_priv; |
1359 | unsigned int plane; | 1359 | unsigned int plane; |
1360 | int ret; | 1360 | int ret; |
1361 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 1361 | enum dma_data_direction dma_dir = |
1362 | V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
1362 | bool reacquired = vb->planes[0].mem_priv == NULL; | 1363 | bool reacquired = vb->planes[0].mem_priv == NULL; |
1363 | 1364 | ||
1364 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); | 1365 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); |
@@ -1400,7 +1401,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1400 | /* Acquire each plane's memory */ | 1401 | /* Acquire each plane's memory */ |
1401 | mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], | 1402 | mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], |
1402 | planes[plane].m.userptr, | 1403 | planes[plane].m.userptr, |
1403 | planes[plane].length, write); | 1404 | planes[plane].length, dma_dir); |
1404 | if (IS_ERR_OR_NULL(mem_priv)) { | 1405 | if (IS_ERR_OR_NULL(mem_priv)) { |
1405 | dprintk(1, "failed acquiring userspace " | 1406 | dprintk(1, "failed acquiring userspace " |
1406 | "memory for plane %d\n", plane); | 1407 | "memory for plane %d\n", plane); |
@@ -1461,7 +1462,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1461 | void *mem_priv; | 1462 | void *mem_priv; |
1462 | unsigned int plane; | 1463 | unsigned int plane; |
1463 | int ret; | 1464 | int ret; |
1464 | int write = !V4L2_TYPE_IS_OUTPUT(q->type); | 1465 | enum dma_data_direction dma_dir = |
1466 | V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
1465 | bool reacquired = vb->planes[0].mem_priv == NULL; | 1467 | bool reacquired = vb->planes[0].mem_priv == NULL; |
1466 | 1468 | ||
1467 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); | 1469 | memset(planes, 0, sizeof(planes[0]) * vb->num_planes); |
@@ -1509,7 +1511,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) | |||
1509 | 1511 | ||
1510 | /* Acquire each plane's memory */ | 1512 | /* Acquire each plane's memory */ |
1511 | mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], | 1513 | mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], |
1512 | dbuf, planes[plane].length, write); | 1514 | dbuf, planes[plane].length, dma_dir); |
1513 | if (IS_ERR(mem_priv)) { | 1515 | if (IS_ERR(mem_priv)) { |
1514 | dprintk(1, "failed to attach dmabuf\n"); | 1516 | dprintk(1, "failed to attach dmabuf\n"); |
1515 | ret = PTR_ERR(mem_priv); | 1517 | ret = PTR_ERR(mem_priv); |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4a02ade14b4f..2bdffd383572 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -229,7 +229,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
229 | 229 | ||
230 | struct vb2_dc_attachment { | 230 | struct vb2_dc_attachment { |
231 | struct sg_table sgt; | 231 | struct sg_table sgt; |
232 | enum dma_data_direction dir; | 232 | enum dma_data_direction dma_dir; |
233 | }; | 233 | }; |
234 | 234 | ||
235 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | 235 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, |
@@ -264,7 +264,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | |||
264 | wr = sg_next(wr); | 264 | wr = sg_next(wr); |
265 | } | 265 | } |
266 | 266 | ||
267 | attach->dir = DMA_NONE; | 267 | attach->dma_dir = DMA_NONE; |
268 | dbuf_attach->priv = attach; | 268 | dbuf_attach->priv = attach; |
269 | 269 | ||
270 | return 0; | 270 | return 0; |
@@ -282,16 +282,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, | |||
282 | sgt = &attach->sgt; | 282 | sgt = &attach->sgt; |
283 | 283 | ||
284 | /* release the scatterlist cache */ | 284 | /* release the scatterlist cache */ |
285 | if (attach->dir != DMA_NONE) | 285 | if (attach->dma_dir != DMA_NONE) |
286 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | 286 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
287 | attach->dir); | 287 | attach->dma_dir); |
288 | sg_free_table(sgt); | 288 | sg_free_table(sgt); |
289 | kfree(attach); | 289 | kfree(attach); |
290 | db_attach->priv = NULL; | 290 | db_attach->priv = NULL; |
291 | } | 291 | } |
292 | 292 | ||
293 | static struct sg_table *vb2_dc_dmabuf_ops_map( | 293 | static struct sg_table *vb2_dc_dmabuf_ops_map( |
294 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | 294 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) |
295 | { | 295 | { |
296 | struct vb2_dc_attachment *attach = db_attach->priv; | 296 | struct vb2_dc_attachment *attach = db_attach->priv; |
297 | /* stealing dmabuf mutex to serialize map/unmap operations */ | 297 | /* stealing dmabuf mutex to serialize map/unmap operations */ |
@@ -303,27 +303,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( | |||
303 | 303 | ||
304 | sgt = &attach->sgt; | 304 | sgt = &attach->sgt; |
305 | /* return previously mapped sg table */ | 305 | /* return previously mapped sg table */ |
306 | if (attach->dir == dir) { | 306 | if (attach->dma_dir == dma_dir) { |
307 | mutex_unlock(lock); | 307 | mutex_unlock(lock); |
308 | return sgt; | 308 | return sgt; |
309 | } | 309 | } |
310 | 310 | ||
311 | /* release any previous cache */ | 311 | /* release any previous cache */ |
312 | if (attach->dir != DMA_NONE) { | 312 | if (attach->dma_dir != DMA_NONE) { |
313 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | 313 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
314 | attach->dir); | 314 | attach->dma_dir); |
315 | attach->dir = DMA_NONE; | 315 | attach->dma_dir = DMA_NONE; |
316 | } | 316 | } |
317 | 317 | ||
318 | /* mapping to the client with new direction */ | 318 | /* mapping to the client with new direction */ |
319 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | 319 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); |
320 | if (ret <= 0) { | 320 | if (ret <= 0) { |
321 | pr_err("failed to map scatterlist\n"); | 321 | pr_err("failed to map scatterlist\n"); |
322 | mutex_unlock(lock); | 322 | mutex_unlock(lock); |
323 | return ERR_PTR(-EIO); | 323 | return ERR_PTR(-EIO); |
324 | } | 324 | } |
325 | 325 | ||
326 | attach->dir = dir; | 326 | attach->dma_dir = dma_dir; |
327 | 327 | ||
328 | mutex_unlock(lock); | 328 | mutex_unlock(lock); |
329 | 329 | ||
@@ -331,7 +331,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( | |||
331 | } | 331 | } |
332 | 332 | ||
333 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | 333 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, |
334 | struct sg_table *sgt, enum dma_data_direction dir) | 334 | struct sg_table *sgt, enum dma_data_direction dma_dir) |
335 | { | 335 | { |
336 | /* nothing to be done here */ | 336 | /* nothing to be done here */ |
337 | } | 337 | } |
@@ -460,7 +460,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, | |||
460 | } | 460 | } |
461 | 461 | ||
462 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | 462 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, |
463 | int n_pages, struct vm_area_struct *vma, int write) | 463 | int n_pages, struct vm_area_struct *vma, |
464 | enum dma_data_direction dma_dir) | ||
464 | { | 465 | { |
465 | if (vma_is_io(vma)) { | 466 | if (vma_is_io(vma)) { |
466 | unsigned int i; | 467 | unsigned int i; |
@@ -482,7 +483,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | |||
482 | int n; | 483 | int n; |
483 | 484 | ||
484 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | 485 | n = get_user_pages(current, current->mm, start & PAGE_MASK, |
485 | n_pages, write, 1, pages, NULL); | 486 | n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); |
486 | /* negative error means that no page was pinned */ | 487 | /* negative error means that no page was pinned */ |
487 | n = max(n, 0); | 488 | n = max(n, 0); |
488 | if (n != n_pages) { | 489 | if (n != n_pages) { |
@@ -551,7 +552,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn | |||
551 | #endif | 552 | #endif |
552 | 553 | ||
553 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | 554 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
554 | unsigned long size, int write) | 555 | unsigned long size, enum dma_data_direction dma_dir) |
555 | { | 556 | { |
556 | struct vb2_dc_conf *conf = alloc_ctx; | 557 | struct vb2_dc_conf *conf = alloc_ctx; |
557 | struct vb2_dc_buf *buf; | 558 | struct vb2_dc_buf *buf; |
@@ -582,7 +583,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
582 | return ERR_PTR(-ENOMEM); | 583 | return ERR_PTR(-ENOMEM); |
583 | 584 | ||
584 | buf->dev = conf->dev; | 585 | buf->dev = conf->dev; |
585 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 586 | buf->dma_dir = dma_dir; |
586 | 587 | ||
587 | start = vaddr & PAGE_MASK; | 588 | start = vaddr & PAGE_MASK; |
588 | offset = vaddr & ~PAGE_MASK; | 589 | offset = vaddr & ~PAGE_MASK; |
@@ -618,7 +619,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
618 | } | 619 | } |
619 | 620 | ||
620 | /* extract page list from userspace mapping */ | 621 | /* extract page list from userspace mapping */ |
621 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | 622 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, |
623 | dma_dir == DMA_FROM_DEVICE); | ||
622 | if (ret) { | 624 | if (ret) { |
623 | unsigned long pfn; | 625 | unsigned long pfn; |
624 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | 626 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { |
@@ -782,7 +784,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv) | |||
782 | } | 784 | } |
783 | 785 | ||
784 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | 786 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, |
785 | unsigned long size, int write) | 787 | unsigned long size, enum dma_data_direction dma_dir) |
786 | { | 788 | { |
787 | struct vb2_dc_conf *conf = alloc_ctx; | 789 | struct vb2_dc_conf *conf = alloc_ctx; |
788 | struct vb2_dc_buf *buf; | 790 | struct vb2_dc_buf *buf; |
@@ -804,7 +806,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |||
804 | return dba; | 806 | return dba; |
805 | } | 807 | } |
806 | 808 | ||
807 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 809 | buf->dma_dir = dma_dir; |
808 | buf->size = size; | 810 | buf->size = size; |
809 | buf->db_attach = dba; | 811 | buf->db_attach = dba; |
810 | 812 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 9b163a440f89..6b54a14ee827 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -33,8 +33,8 @@ module_param(debug, int, 0644); | |||
33 | struct vb2_dma_sg_buf { | 33 | struct vb2_dma_sg_buf { |
34 | void *vaddr; | 34 | void *vaddr; |
35 | struct page **pages; | 35 | struct page **pages; |
36 | int write; | ||
37 | int offset; | 36 | int offset; |
37 | enum dma_data_direction dma_dir; | ||
38 | struct sg_table sg_table; | 38 | struct sg_table sg_table; |
39 | size_t size; | 39 | size_t size; |
40 | unsigned int num_pages; | 40 | unsigned int num_pages; |
@@ -97,7 +97,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
97 | return NULL; | 97 | return NULL; |
98 | 98 | ||
99 | buf->vaddr = NULL; | 99 | buf->vaddr = NULL; |
100 | buf->write = 0; | 100 | buf->dma_dir = DMA_NONE; |
101 | buf->offset = 0; | 101 | buf->offset = 0; |
102 | buf->size = size; | 102 | buf->size = size; |
103 | /* size is already page aligned */ | 103 | /* size is already page aligned */ |
@@ -162,7 +162,8 @@ static inline int vma_is_io(struct vm_area_struct *vma) | |||
162 | } | 162 | } |
163 | 163 | ||
164 | static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | 164 | static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, |
165 | unsigned long size, int write) | 165 | unsigned long size, |
166 | enum dma_data_direction dma_dir) | ||
166 | { | 167 | { |
167 | struct vb2_dma_sg_buf *buf; | 168 | struct vb2_dma_sg_buf *buf; |
168 | unsigned long first, last; | 169 | unsigned long first, last; |
@@ -174,7 +175,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
174 | return NULL; | 175 | return NULL; |
175 | 176 | ||
176 | buf->vaddr = NULL; | 177 | buf->vaddr = NULL; |
177 | buf->write = write; | 178 | buf->dma_dir = dma_dir; |
178 | buf->offset = vaddr & ~PAGE_MASK; | 179 | buf->offset = vaddr & ~PAGE_MASK; |
179 | buf->size = size; | 180 | buf->size = size; |
180 | 181 | ||
@@ -221,7 +222,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
221 | num_pages_from_user = get_user_pages(current, current->mm, | 222 | num_pages_from_user = get_user_pages(current, current->mm, |
222 | vaddr & PAGE_MASK, | 223 | vaddr & PAGE_MASK, |
223 | buf->num_pages, | 224 | buf->num_pages, |
224 | write, | 225 | buf->dma_dir == DMA_FROM_DEVICE, |
225 | 1, /* force */ | 226 | 1, /* force */ |
226 | buf->pages, | 227 | buf->pages, |
227 | NULL); | 228 | NULL); |
@@ -265,7 +266,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) | |||
265 | vm_unmap_ram(buf->vaddr, buf->num_pages); | 266 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
266 | sg_free_table(&buf->sg_table); | 267 | sg_free_table(&buf->sg_table); |
267 | while (--i >= 0) { | 268 | while (--i >= 0) { |
268 | if (buf->write) | 269 | if (buf->dma_dir == DMA_FROM_DEVICE) |
269 | set_page_dirty_lock(buf->pages[i]); | 270 | set_page_dirty_lock(buf->pages[i]); |
270 | if (!vma_is_io(buf->vma)) | 271 | if (!vma_is_io(buf->vma)) |
271 | put_page(buf->pages[i]); | 272 | put_page(buf->pages[i]); |
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 313d9771b2bc..fc1eb45a6143 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c | |||
@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf { | |||
25 | void *vaddr; | 25 | void *vaddr; |
26 | struct page **pages; | 26 | struct page **pages; |
27 | struct vm_area_struct *vma; | 27 | struct vm_area_struct *vma; |
28 | int write; | 28 | enum dma_data_direction dma_dir; |
29 | unsigned long size; | 29 | unsigned long size; |
30 | unsigned int n_pages; | 30 | unsigned int n_pages; |
31 | atomic_t refcount; | 31 | atomic_t refcount; |
@@ -70,7 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, | 72 | static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
73 | unsigned long size, int write) | 73 | unsigned long size, |
74 | enum dma_data_direction dma_dir) | ||
74 | { | 75 | { |
75 | struct vb2_vmalloc_buf *buf; | 76 | struct vb2_vmalloc_buf *buf; |
76 | unsigned long first, last; | 77 | unsigned long first, last; |
@@ -82,7 +83,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
82 | if (!buf) | 83 | if (!buf) |
83 | return NULL; | 84 | return NULL; |
84 | 85 | ||
85 | buf->write = write; | 86 | buf->dma_dir = dma_dir; |
86 | offset = vaddr & ~PAGE_MASK; | 87 | offset = vaddr & ~PAGE_MASK; |
87 | buf->size = size; | 88 | buf->size = size; |
88 | 89 | ||
@@ -107,7 +108,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
107 | /* current->mm->mmap_sem is taken by videobuf2 core */ | 108 | /* current->mm->mmap_sem is taken by videobuf2 core */ |
108 | n_pages = get_user_pages(current, current->mm, | 109 | n_pages = get_user_pages(current, current->mm, |
109 | vaddr & PAGE_MASK, buf->n_pages, | 110 | vaddr & PAGE_MASK, buf->n_pages, |
110 | write, 1, /* force */ | 111 | dma_dir == DMA_FROM_DEVICE, |
112 | 1, /* force */ | ||
111 | buf->pages, NULL); | 113 | buf->pages, NULL); |
112 | if (n_pages != buf->n_pages) | 114 | if (n_pages != buf->n_pages) |
113 | goto fail_get_user_pages; | 115 | goto fail_get_user_pages; |
@@ -144,7 +146,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) | |||
144 | if (vaddr) | 146 | if (vaddr) |
145 | vm_unmap_ram((void *)vaddr, buf->n_pages); | 147 | vm_unmap_ram((void *)vaddr, buf->n_pages); |
146 | for (i = 0; i < buf->n_pages; ++i) { | 148 | for (i = 0; i < buf->n_pages; ++i) { |
147 | if (buf->write) | 149 | if (buf->dma_dir == DMA_FROM_DEVICE) |
148 | set_page_dirty_lock(buf->pages[i]); | 150 | set_page_dirty_lock(buf->pages[i]); |
149 | put_page(buf->pages[i]); | 151 | put_page(buf->pages[i]); |
150 | } | 152 | } |
@@ -240,7 +242,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv) | |||
240 | } | 242 | } |
241 | 243 | ||
242 | static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | 244 | static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, |
243 | unsigned long size, int write) | 245 | unsigned long size, enum dma_data_direction dma_dir) |
244 | { | 246 | { |
245 | struct vb2_vmalloc_buf *buf; | 247 | struct vb2_vmalloc_buf *buf; |
246 | 248 | ||
@@ -252,7 +254,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |||
252 | return ERR_PTR(-ENOMEM); | 254 | return ERR_PTR(-ENOMEM); |
253 | 255 | ||
254 | buf->dbuf = dbuf; | 256 | buf->dbuf = dbuf; |
255 | buf->write = write; | 257 | buf->dma_dir = dma_dir; |
256 | buf->size = size; | 258 | buf->size = size; |
257 | 259 | ||
258 | return buf; | 260 | return buf; |