diff options
author | Ricardo Ribalda <ricardo.ribalda@gmail.com> | 2013-08-02 09:20:00 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2013-09-26 06:33:59 -0400 |
commit | 223012475968fb8dac866bff5b278e9311a36894 (patch) | |
tree | ed1ea775ccb2353502dd78f76190160c07b8e9d5 | |
parent | df23728118cd0f53070769e2ac26a255f66daa57 (diff) |
[media] videobuf2-dma-sg: Replace vb2_dma_sg_desc with sg_table
Replace the private struct vb2_dma_sg_desc with the struct sg_table so
we can benefit from all the helping functions in lib/scatterlist.c for
things like allocating the sg or compacting the descriptor.
marvel-ccic and solo6x10 drivers, that use this API have been updated.
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Andre Heider <a.heider@gmail.com>
Signed-off-by: Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
[s.nawrocki@samsung.com: minor corrections of the changelog]
Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r-- | drivers/media/platform/marvell-ccic/mcam-core.c | 14 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 103 | ||||
-rw-r--r-- | drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c | 20 | ||||
-rw-r--r-- | include/media/videobuf2-dma-sg.h | 10 |
4 files changed, 63 insertions, 84 deletions
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 5184887b155c..32fab30a9105 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c | |||
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) | |||
1221 | { | 1221 | { |
1222 | struct mcam_vb_buffer *mvb = vb_to_mvb(vb); | 1222 | struct mcam_vb_buffer *mvb = vb_to_mvb(vb); |
1223 | struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); | 1223 | struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); |
1224 | struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); | 1224 | struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); |
1225 | struct mcam_dma_desc *desc = mvb->dma_desc; | 1225 | struct mcam_dma_desc *desc = mvb->dma_desc; |
1226 | struct scatterlist *sg; | 1226 | struct scatterlist *sg; |
1227 | int i; | 1227 | int i; |
1228 | 1228 | ||
1229 | mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages, | 1229 | mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, |
1230 | DMA_FROM_DEVICE); | 1230 | sg_table->nents, DMA_FROM_DEVICE); |
1231 | if (mvb->dma_desc_nent <= 0) | 1231 | if (mvb->dma_desc_nent <= 0) |
1232 | return -EIO; /* Not sure what's right here */ | 1232 | return -EIO; /* Not sure what's right here */ |
1233 | for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) { | 1233 | for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) { |
1234 | desc->dma_addr = sg_dma_address(sg); | 1234 | desc->dma_addr = sg_dma_address(sg); |
1235 | desc->segment_len = sg_dma_len(sg); | 1235 | desc->segment_len = sg_dma_len(sg); |
1236 | desc++; | 1236 | desc++; |
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) | |||
1241 | static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb) | 1241 | static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb) |
1242 | { | 1242 | { |
1243 | struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); | 1243 | struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); |
1244 | struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); | 1244 | struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); |
1245 | 1245 | ||
1246 | dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE); | 1246 | if (sg_table) |
1247 | dma_unmap_sg(cam->dev, sg_table->sgl, | ||
1248 | sg_table->nents, DMA_FROM_DEVICE); | ||
1247 | return 0; | 1249 | return 0; |
1248 | } | 1250 | } |
1249 | 1251 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 4999c4886898..2f860543912c 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -35,7 +35,9 @@ struct vb2_dma_sg_buf { | |||
35 | struct page **pages; | 35 | struct page **pages; |
36 | int write; | 36 | int write; |
37 | int offset; | 37 | int offset; |
38 | struct vb2_dma_sg_desc sg_desc; | 38 | struct sg_table sg_table; |
39 | size_t size; | ||
40 | unsigned int num_pages; | ||
39 | atomic_t refcount; | 41 | atomic_t refcount; |
40 | struct vb2_vmarea_handler handler; | 42 | struct vb2_vmarea_handler handler; |
41 | }; | 43 | }; |
@@ -46,7 +48,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, | |||
46 | gfp_t gfp_flags) | 48 | gfp_t gfp_flags) |
47 | { | 49 | { |
48 | unsigned int last_page = 0; | 50 | unsigned int last_page = 0; |
49 | int size = buf->sg_desc.size; | 51 | int size = buf->size; |
50 | 52 | ||
51 | while (size > 0) { | 53 | while (size > 0) { |
52 | struct page *pages; | 54 | struct page *pages; |
@@ -74,12 +76,8 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, | |||
74 | } | 76 | } |
75 | 77 | ||
76 | split_page(pages, order); | 78 | split_page(pages, order); |
77 | for (i = 0; i < (1 << order); i++) { | 79 | for (i = 0; i < (1 << order); i++) |
78 | buf->pages[last_page] = &pages[i]; | 80 | buf->pages[last_page++] = &pages[i]; |
79 | sg_set_page(&buf->sg_desc.sglist[last_page], | ||
80 | buf->pages[last_page], PAGE_SIZE, 0); | ||
81 | last_page++; | ||
82 | } | ||
83 | 81 | ||
84 | size -= PAGE_SIZE << order; | 82 | size -= PAGE_SIZE << order; |
85 | } | 83 | } |
@@ -91,6 +89,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
91 | { | 89 | { |
92 | struct vb2_dma_sg_buf *buf; | 90 | struct vb2_dma_sg_buf *buf; |
93 | int ret; | 91 | int ret; |
92 | int num_pages; | ||
94 | 93 | ||
95 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 94 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
96 | if (!buf) | 95 | if (!buf) |
@@ -99,17 +98,11 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
99 | buf->vaddr = NULL; | 98 | buf->vaddr = NULL; |
100 | buf->write = 0; | 99 | buf->write = 0; |
101 | buf->offset = 0; | 100 | buf->offset = 0; |
102 | buf->sg_desc.size = size; | 101 | buf->size = size; |
103 | /* size is already page aligned */ | 102 | /* size is already page aligned */ |
104 | buf->sg_desc.num_pages = size >> PAGE_SHIFT; | 103 | buf->num_pages = size >> PAGE_SHIFT; |
105 | |||
106 | buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages * | ||
107 | sizeof(*buf->sg_desc.sglist)); | ||
108 | if (!buf->sg_desc.sglist) | ||
109 | goto fail_sglist_alloc; | ||
110 | sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); | ||
111 | 104 | ||
112 | buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), | 105 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
113 | GFP_KERNEL); | 106 | GFP_KERNEL); |
114 | if (!buf->pages) | 107 | if (!buf->pages) |
115 | goto fail_pages_array_alloc; | 108 | goto fail_pages_array_alloc; |
@@ -118,6 +111,11 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
118 | if (ret) | 111 | if (ret) |
119 | goto fail_pages_alloc; | 112 | goto fail_pages_alloc; |
120 | 113 | ||
114 | ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, | ||
115 | buf->num_pages, 0, size, gfp_flags); | ||
116 | if (ret) | ||
117 | goto fail_table_alloc; | ||
118 | |||
121 | buf->handler.refcount = &buf->refcount; | 119 | buf->handler.refcount = &buf->refcount; |
122 | buf->handler.put = vb2_dma_sg_put; | 120 | buf->handler.put = vb2_dma_sg_put; |
123 | buf->handler.arg = buf; | 121 | buf->handler.arg = buf; |
@@ -125,16 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla | |||
125 | atomic_inc(&buf->refcount); | 123 | atomic_inc(&buf->refcount); |
126 | 124 | ||
127 | dprintk(1, "%s: Allocated buffer of %d pages\n", | 125 | dprintk(1, "%s: Allocated buffer of %d pages\n", |
128 | __func__, buf->sg_desc.num_pages); | 126 | __func__, buf->num_pages); |
129 | return buf; | 127 | return buf; |
130 | 128 | ||
129 | fail_table_alloc: | ||
130 | num_pages = buf->num_pages; | ||
131 | while (num_pages--) | ||
132 | __free_page(buf->pages[num_pages]); | ||
131 | fail_pages_alloc: | 133 | fail_pages_alloc: |
132 | kfree(buf->pages); | 134 | kfree(buf->pages); |
133 | |||
134 | fail_pages_array_alloc: | 135 | fail_pages_array_alloc: |
135 | vfree(buf->sg_desc.sglist); | ||
136 | |||
137 | fail_sglist_alloc: | ||
138 | kfree(buf); | 136 | kfree(buf); |
139 | return NULL; | 137 | return NULL; |
140 | } | 138 | } |
@@ -142,14 +140,14 @@ fail_sglist_alloc: | |||
142 | static void vb2_dma_sg_put(void *buf_priv) | 140 | static void vb2_dma_sg_put(void *buf_priv) |
143 | { | 141 | { |
144 | struct vb2_dma_sg_buf *buf = buf_priv; | 142 | struct vb2_dma_sg_buf *buf = buf_priv; |
145 | int i = buf->sg_desc.num_pages; | 143 | int i = buf->num_pages; |
146 | 144 | ||
147 | if (atomic_dec_and_test(&buf->refcount)) { | 145 | if (atomic_dec_and_test(&buf->refcount)) { |
148 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, | 146 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
149 | buf->sg_desc.num_pages); | 147 | buf->num_pages); |
150 | if (buf->vaddr) | 148 | if (buf->vaddr) |
151 | vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); | 149 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
152 | vfree(buf->sg_desc.sglist); | 150 | sg_free_table(&buf->sg_table); |
153 | while (--i >= 0) | 151 | while (--i >= 0) |
154 | __free_page(buf->pages[i]); | 152 | __free_page(buf->pages[i]); |
155 | kfree(buf->pages); | 153 | kfree(buf->pages); |
@@ -162,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
162 | { | 160 | { |
163 | struct vb2_dma_sg_buf *buf; | 161 | struct vb2_dma_sg_buf *buf; |
164 | unsigned long first, last; | 162 | unsigned long first, last; |
165 | int num_pages_from_user, i; | 163 | int num_pages_from_user; |
166 | 164 | ||
167 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 165 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
168 | if (!buf) | 166 | if (!buf) |
@@ -171,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
171 | buf->vaddr = NULL; | 169 | buf->vaddr = NULL; |
172 | buf->write = write; | 170 | buf->write = write; |
173 | buf->offset = vaddr & ~PAGE_MASK; | 171 | buf->offset = vaddr & ~PAGE_MASK; |
174 | buf->sg_desc.size = size; | 172 | buf->size = size; |
175 | 173 | ||
176 | first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; | 174 | first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; |
177 | last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; | 175 | last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; |
178 | buf->sg_desc.num_pages = last - first + 1; | 176 | buf->num_pages = last - first + 1; |
179 | |||
180 | buf->sg_desc.sglist = vzalloc( | ||
181 | buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); | ||
182 | if (!buf->sg_desc.sglist) | ||
183 | goto userptr_fail_sglist_alloc; | ||
184 | 177 | ||
185 | sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); | 178 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
186 | |||
187 | buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), | ||
188 | GFP_KERNEL); | 179 | GFP_KERNEL); |
189 | if (!buf->pages) | 180 | if (!buf->pages) |
190 | goto userptr_fail_pages_array_alloc; | 181 | return NULL; |
191 | 182 | ||
192 | num_pages_from_user = get_user_pages(current, current->mm, | 183 | num_pages_from_user = get_user_pages(current, current->mm, |
193 | vaddr & PAGE_MASK, | 184 | vaddr & PAGE_MASK, |
194 | buf->sg_desc.num_pages, | 185 | buf->num_pages, |
195 | write, | 186 | write, |
196 | 1, /* force */ | 187 | 1, /* force */ |
197 | buf->pages, | 188 | buf->pages, |
198 | NULL); | 189 | NULL); |
199 | 190 | ||
200 | if (num_pages_from_user != buf->sg_desc.num_pages) | 191 | if (num_pages_from_user != buf->num_pages) |
201 | goto userptr_fail_get_user_pages; | 192 | goto userptr_fail_get_user_pages; |
202 | 193 | ||
203 | sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0], | 194 | if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, |
204 | PAGE_SIZE - buf->offset, buf->offset); | 195 | buf->num_pages, buf->offset, size, 0)) |
205 | size -= PAGE_SIZE - buf->offset; | 196 | goto userptr_fail_alloc_table_from_pages; |
206 | for (i = 1; i < buf->sg_desc.num_pages; ++i) { | 197 | |
207 | sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], | ||
208 | min_t(size_t, PAGE_SIZE, size), 0); | ||
209 | size -= min_t(size_t, PAGE_SIZE, size); | ||
210 | } | ||
211 | return buf; | 198 | return buf; |
212 | 199 | ||
200 | userptr_fail_alloc_table_from_pages: | ||
213 | userptr_fail_get_user_pages: | 201 | userptr_fail_get_user_pages: |
214 | dprintk(1, "get_user_pages requested/got: %d/%d]\n", | 202 | dprintk(1, "get_user_pages requested/got: %d/%d]\n", |
215 | num_pages_from_user, buf->sg_desc.num_pages); | 203 | num_pages_from_user, buf->num_pages); |
216 | while (--num_pages_from_user >= 0) | 204 | while (--num_pages_from_user >= 0) |
217 | put_page(buf->pages[num_pages_from_user]); | 205 | put_page(buf->pages[num_pages_from_user]); |
218 | kfree(buf->pages); | 206 | kfree(buf->pages); |
219 | |||
220 | userptr_fail_pages_array_alloc: | ||
221 | vfree(buf->sg_desc.sglist); | ||
222 | |||
223 | userptr_fail_sglist_alloc: | ||
224 | kfree(buf); | 207 | kfree(buf); |
225 | return NULL; | 208 | return NULL; |
226 | } | 209 | } |
@@ -232,18 +215,18 @@ userptr_fail_sglist_alloc: | |||
232 | static void vb2_dma_sg_put_userptr(void *buf_priv) | 215 | static void vb2_dma_sg_put_userptr(void *buf_priv) |
233 | { | 216 | { |
234 | struct vb2_dma_sg_buf *buf = buf_priv; | 217 | struct vb2_dma_sg_buf *buf = buf_priv; |
235 | int i = buf->sg_desc.num_pages; | 218 | int i = buf->num_pages; |
236 | 219 | ||
237 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", | 220 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
238 | __func__, buf->sg_desc.num_pages); | 221 | __func__, buf->num_pages); |
239 | if (buf->vaddr) | 222 | if (buf->vaddr) |
240 | vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); | 223 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
224 | sg_free_table(&buf->sg_table); | ||
241 | while (--i >= 0) { | 225 | while (--i >= 0) { |
242 | if (buf->write) | 226 | if (buf->write) |
243 | set_page_dirty_lock(buf->pages[i]); | 227 | set_page_dirty_lock(buf->pages[i]); |
244 | put_page(buf->pages[i]); | 228 | put_page(buf->pages[i]); |
245 | } | 229 | } |
246 | vfree(buf->sg_desc.sglist); | ||
247 | kfree(buf->pages); | 230 | kfree(buf->pages); |
248 | kfree(buf); | 231 | kfree(buf); |
249 | } | 232 | } |
@@ -256,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) | |||
256 | 239 | ||
257 | if (!buf->vaddr) | 240 | if (!buf->vaddr) |
258 | buf->vaddr = vm_map_ram(buf->pages, | 241 | buf->vaddr = vm_map_ram(buf->pages, |
259 | buf->sg_desc.num_pages, | 242 | buf->num_pages, |
260 | -1, | 243 | -1, |
261 | PAGE_KERNEL); | 244 | PAGE_KERNEL); |
262 | 245 | ||
@@ -312,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv) | |||
312 | { | 295 | { |
313 | struct vb2_dma_sg_buf *buf = buf_priv; | 296 | struct vb2_dma_sg_buf *buf = buf_priv; |
314 | 297 | ||
315 | return &buf->sg_desc; | 298 | return &buf->sg_table; |
316 | } | 299 | } |
317 | 300 | ||
318 | const struct vb2_mem_ops vb2_dma_sg_memops = { | 301 | const struct vb2_mem_ops vb2_dma_sg_memops = { |
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c index a4c589604b02..9a6d5c0b1339 100644 --- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c +++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c | |||
@@ -346,7 +346,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma, | |||
346 | /* Build a descriptor queue out of an SG list and send it to the P2M for | 346 | /* Build a descriptor queue out of an SG list and send it to the P2M for |
347 | * processing. */ | 347 | * processing. */ |
348 | static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, | 348 | static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, |
349 | struct vb2_dma_sg_desc *vbuf, int off, int size, | 349 | struct sg_table *vbuf, int off, int size, |
350 | unsigned int base, unsigned int base_size) | 350 | unsigned int base, unsigned int base_size) |
351 | { | 351 | { |
352 | struct solo_dev *solo_dev = solo_enc->solo_dev; | 352 | struct solo_dev *solo_dev = solo_enc->solo_dev; |
@@ -359,7 +359,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip, | |||
359 | 359 | ||
360 | solo_enc->desc_count = 1; | 360 | solo_enc->desc_count = 1; |
361 | 361 | ||
362 | for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) { | 362 | for_each_sg(vbuf->sgl, sg, vbuf->nents, i) { |
363 | struct solo_p2m_desc *desc; | 363 | struct solo_p2m_desc *desc; |
364 | dma_addr_t dma; | 364 | dma_addr_t dma; |
365 | int len; | 365 | int len; |
@@ -434,7 +434,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, | |||
434 | struct vb2_buffer *vb, struct vop_header *vh) | 434 | struct vb2_buffer *vb, struct vop_header *vh) |
435 | { | 435 | { |
436 | struct solo_dev *solo_dev = solo_enc->solo_dev; | 436 | struct solo_dev *solo_dev = solo_enc->solo_dev; |
437 | struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); | 437 | struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); |
438 | int frame_size; | 438 | int frame_size; |
439 | int ret; | 439 | int ret; |
440 | 440 | ||
@@ -443,7 +443,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, | |||
443 | if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len) | 443 | if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len) |
444 | return -EIO; | 444 | return -EIO; |
445 | 445 | ||
446 | sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, | 446 | sg_copy_from_buffer(vbuf->sgl, vbuf->nents, |
447 | solo_enc->jpeg_header, | 447 | solo_enc->jpeg_header, |
448 | solo_enc->jpeg_len); | 448 | solo_enc->jpeg_len); |
449 | 449 | ||
@@ -451,12 +451,12 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, | |||
451 | & ~(DMA_ALIGN - 1); | 451 | & ~(DMA_ALIGN - 1); |
452 | vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len); | 452 | vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len); |
453 | 453 | ||
454 | dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, | 454 | dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, |
455 | DMA_FROM_DEVICE); | 455 | DMA_FROM_DEVICE); |
456 | ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off, | 456 | ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off, |
457 | frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), | 457 | frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), |
458 | SOLO_JPEG_EXT_SIZE(solo_dev)); | 458 | SOLO_JPEG_EXT_SIZE(solo_dev)); |
459 | dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, | 459 | dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, |
460 | DMA_FROM_DEVICE); | 460 | DMA_FROM_DEVICE); |
461 | return ret; | 461 | return ret; |
462 | } | 462 | } |
@@ -465,7 +465,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, | |||
465 | struct vb2_buffer *vb, struct vop_header *vh) | 465 | struct vb2_buffer *vb, struct vop_header *vh) |
466 | { | 466 | { |
467 | struct solo_dev *solo_dev = solo_enc->solo_dev; | 467 | struct solo_dev *solo_dev = solo_enc->solo_dev; |
468 | struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0); | 468 | struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); |
469 | int frame_off, frame_size; | 469 | int frame_off, frame_size; |
470 | int skip = 0; | 470 | int skip = 0; |
471 | int ret; | 471 | int ret; |
@@ -475,7 +475,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, | |||
475 | 475 | ||
476 | /* If this is a key frame, add extra header */ | 476 | /* If this is a key frame, add extra header */ |
477 | if (!vh->vop_type) { | 477 | if (!vh->vop_type) { |
478 | sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages, | 478 | sg_copy_from_buffer(vbuf->sgl, vbuf->nents, |
479 | solo_enc->vop, | 479 | solo_enc->vop, |
480 | solo_enc->vop_len); | 480 | solo_enc->vop_len); |
481 | 481 | ||
@@ -494,12 +494,12 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, | |||
494 | frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1)) | 494 | frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1)) |
495 | & ~(DMA_ALIGN - 1); | 495 | & ~(DMA_ALIGN - 1); |
496 | 496 | ||
497 | dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, | 497 | dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, |
498 | DMA_FROM_DEVICE); | 498 | DMA_FROM_DEVICE); |
499 | ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size, | 499 | ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size, |
500 | SOLO_MP4E_EXT_ADDR(solo_dev), | 500 | SOLO_MP4E_EXT_ADDR(solo_dev), |
501 | SOLO_MP4E_EXT_SIZE(solo_dev)); | 501 | SOLO_MP4E_EXT_SIZE(solo_dev)); |
502 | dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages, | 502 | dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents, |
503 | DMA_FROM_DEVICE); | 503 | DMA_FROM_DEVICE); |
504 | return ret; | 504 | return ret; |
505 | } | 505 | } |
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 0038526b8ef7..7b89852779af 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h | |||
@@ -15,16 +15,10 @@ | |||
15 | 15 | ||
16 | #include <media/videobuf2-core.h> | 16 | #include <media/videobuf2-core.h> |
17 | 17 | ||
18 | struct vb2_dma_sg_desc { | 18 | static inline struct sg_table *vb2_dma_sg_plane_desc( |
19 | unsigned long size; | ||
20 | unsigned int num_pages; | ||
21 | struct scatterlist *sglist; | ||
22 | }; | ||
23 | |||
24 | static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc( | ||
25 | struct vb2_buffer *vb, unsigned int plane_no) | 19 | struct vb2_buffer *vb, unsigned int plane_no) |
26 | { | 20 | { |
27 | return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no); | 21 | return (struct sg_table *)vb2_plane_cookie(vb, plane_no); |
28 | } | 22 | } |
29 | 23 | ||
30 | extern const struct vb2_mem_ops vb2_dma_sg_memops; | 24 | extern const struct vb2_mem_ops vb2_dma_sg_memops; |