aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/ivtv/ivtv-udma.c
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil@xs4all.nl>2007-08-19 11:25:39 -0400
committerMauro Carvalho Chehab <mchehab@infradead.org>2007-10-09 21:05:46 -0400
commit0989fd2c88a11aa5014b2b348ed51872d14d536d (patch)
treeb01ba5de69e17fcf408ce597af966e4f0949e7c4 /drivers/media/video/ivtv/ivtv-udma.c
parent3869c6a088c2eb165abe476c3372c6a3653649b3 (diff)
V4L/DVB (6058): ivtv: add support for highmem udma
When trying to DMA userspace buffers to the cx23415 you need to check whether the page is in highmem. If this is the case, then bounce buffers have to be used to allow DMA. Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@infradead.org>
Diffstat (limited to 'drivers/media/video/ivtv/ivtv-udma.c')
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
index bd642e1aafc3..5592abbe14e6 100644
--- a/drivers/media/video/ivtv/ivtv-udma.c
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -38,23 +38,38 @@ void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long
38int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset) 38int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
39{ 39{
40 int i, offset; 40 int i, offset;
41 unsigned long flags;
41 42
42 offset = dma_page->offset; 43 offset = dma_page->offset;
43 44
44 /* Fill SG Array with new values */ 45 /* Fill SG Array with new values */
45 for (i = 0; i < dma_page->page_count; i++) { 46 for (i = 0; i < dma_page->page_count; i++) {
46 if (i == dma_page->page_count - 1) { 47 unsigned int len = (i == dma_page->page_count - 1) ?
47 dma->SGlist[map_offset].length = dma_page->tail; 48 dma_page->tail : PAGE_SIZE - offset;
49
50 dma->SGlist[map_offset].length = len;
51 dma->SGlist[map_offset].offset = offset;
52 if (PageHighMem(dma->map[map_offset])) {
53 void *src;
54
55 if (dma->bouncemap[map_offset] == NULL)
56 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
57 if (dma->bouncemap[map_offset] == NULL)
58 return -ENOMEM;
59 local_irq_save(flags);
60 src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset;
61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62 kunmap_atomic(src, KM_BOUNCE_READ);
63 local_irq_restore(flags);
64 dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
48 } 65 }
49 else { 66 else {
50 dma->SGlist[map_offset].length = PAGE_SIZE - offset; 67 dma->SGlist[map_offset].page = dma->map[map_offset];
51 } 68 }
52 dma->SGlist[map_offset].offset = offset;
53 dma->SGlist[map_offset].page = dma->map[map_offset];
54 offset = 0; 69 offset = 0;
55 map_offset++; 70 map_offset++;
56 } 71 }
57 return map_offset; 72 return 0;
58} 73}
59 74
60void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) { 75void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
@@ -89,7 +104,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
89{ 104{
90 struct ivtv_dma_page_info user_dma; 105 struct ivtv_dma_page_info user_dma;
91 struct ivtv_user_dma *dma = &itv->udma; 106 struct ivtv_user_dma *dma = &itv->udma;
92 int err; 107 int i, err;
93 108
94 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr); 109 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
95 110
@@ -123,7 +138,14 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
123 dma->page_count = user_dma.page_count; 138 dma->page_count = user_dma.page_count;
124 139
125 /* Fill SG List with new values */ 140 /* Fill SG List with new values */
126 ivtv_udma_fill_sg_list(dma, &user_dma, 0); 141 err = ivtv_udma_fill_sg_list(dma, &user_dma, 0);
142 if (err) {
143 for (i = 0; i < dma->page_count; i++) {
144 put_page(dma->map[i]);
145 }
146 dma->page_count = 0;
147 return err;
148 }
127 149
128 /* Map SG List */ 150 /* Map SG List */
129 dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); 151 dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
@@ -166,6 +188,8 @@ void ivtv_udma_unmap(struct ivtv *itv)
166 188
167void ivtv_udma_free(struct ivtv *itv) 189void ivtv_udma_free(struct ivtv *itv)
168{ 190{
191 int i;
192
169 /* Unmap SG Array */ 193 /* Unmap SG Array */
170 if (itv->udma.SG_handle) { 194 if (itv->udma.SG_handle) {
171 pci_unmap_single(itv->dev, itv->udma.SG_handle, 195 pci_unmap_single(itv->dev, itv->udma.SG_handle,
@@ -176,6 +200,11 @@ void ivtv_udma_free(struct ivtv *itv)
176 if (itv->udma.SG_length) { 200 if (itv->udma.SG_length) {
177 pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE); 201 pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
178 } 202 }
203
204 for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
205 if (itv->udma.bouncemap[i])
206 __free_page(itv->udma.bouncemap[i]);
207 }
179} 208}
180 209
181void ivtv_udma_start(struct ivtv *itv) 210void ivtv_udma_start(struct ivtv *itv)