diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/drm/via_dma.c | 4 | ||||
-rw-r--r-- | drivers/char/drm/via_dmablit.c | 805 | ||||
-rw-r--r-- | drivers/char/drm/via_dmablit.h | 138 | ||||
-rw-r--r-- | drivers/char/drm/via_drm.h | 2 | ||||
-rw-r--r-- | drivers/char/drm/via_drv.h | 4 | ||||
-rw-r--r-- | drivers/char/drm/via_mm.c | 4 |
6 files changed, 951 insertions, 6 deletions
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c index fa1cb5062cf7..593c0b8f650a 100644 --- a/drivers/char/drm/via_dma.c +++ b/drivers/char/drm/via_dma.c | |||
@@ -736,7 +736,9 @@ drm_ioctl_desc_t via_ioctls[] = { | |||
736 | [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, | 736 | [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, |
737 | [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, | 737 | [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, |
738 | [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, | 738 | [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, |
739 | [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH} | 739 | [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, |
740 | [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, | ||
741 | [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} | ||
740 | }; | 742 | }; |
741 | 743 | ||
742 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); | 744 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); |
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c new file mode 100644 index 000000000000..9d5e027dae0e --- /dev/null +++ b/drivers/char/drm/via_dmablit.c | |||
@@ -0,0 +1,805 @@ | |||
1 | /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro | ||
2 | * | ||
3 | * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the | ||
13 | * next paragraph) shall be included in all copies or substantial portions | ||
14 | * of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
22 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Thomas Hellstrom. | ||
26 | * Partially based on code obtained from Digeo Inc. | ||
27 | */ | ||
28 | |||
29 | |||
30 | /* | ||
31 | * Unmaps the DMA mappings. | ||
32 | * FIXME: Is this a NoOp on x86? Also | ||
33 | * FIXME: What happens if this one is called and a pending blit has previously done | ||
34 | * the same DMA mappings? | ||
35 | */ | ||
36 | |||
37 | #include "drmP.h" | ||
38 | #include "via_drm.h" | ||
39 | #include "via_drv.h" | ||
40 | #include "via_dmablit.h" | ||
41 | |||
42 | #include <linux/pagemap.h> | ||
43 | |||
44 | #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) | ||
45 | #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) | ||
46 | #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) | ||
47 | |||
48 | typedef struct _drm_via_descriptor { | ||
49 | uint32_t mem_addr; | ||
50 | uint32_t dev_addr; | ||
51 | uint32_t size; | ||
52 | uint32_t next; | ||
53 | } drm_via_descriptor_t; | ||
54 | |||
55 | |||
56 | /* | ||
57 | * Unmap a DMA mapping. | ||
58 | */ | ||
59 | |||
60 | |||
61 | |||
62 | static void | ||
63 | via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) | ||
64 | { | ||
65 | int num_desc = vsg->num_desc; | ||
66 | unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; | ||
67 | unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; | ||
68 | drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + | ||
69 | descriptor_this_page; | ||
70 | dma_addr_t next = vsg->chain_start; | ||
71 | |||
72 | while(num_desc--) { | ||
73 | if (descriptor_this_page-- == 0) { | ||
74 | cur_descriptor_page--; | ||
75 | descriptor_this_page = vsg->descriptors_per_page - 1; | ||
76 | desc_ptr = vsg->desc_pages[cur_descriptor_page] + | ||
77 | descriptor_this_page; | ||
78 | } | ||
79 | dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); | ||
80 | dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); | ||
81 | next = (dma_addr_t) desc_ptr->next; | ||
82 | desc_ptr--; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * If mode = 0, count how many descriptors are needed. | ||
88 | * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. | ||
89 | * Descriptors are run in reverse order by the hardware because we are not allowed to update the | ||
90 | * 'next' field without syncing calls when the descriptor is already mapped. | ||
91 | */ | ||
92 | |||
93 | static void | ||
94 | via_map_blit_for_device(struct pci_dev *pdev, | ||
95 | const drm_via_dmablit_t *xfer, | ||
96 | drm_via_sg_info_t *vsg, | ||
97 | int mode) | ||
98 | { | ||
99 | unsigned cur_descriptor_page = 0; | ||
100 | unsigned num_descriptors_this_page = 0; | ||
101 | unsigned char *mem_addr = xfer->mem_addr; | ||
102 | unsigned char *cur_mem; | ||
103 | unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); | ||
104 | uint32_t fb_addr = xfer->fb_addr; | ||
105 | uint32_t cur_fb; | ||
106 | unsigned long line_len; | ||
107 | unsigned remaining_len; | ||
108 | int num_desc = 0; | ||
109 | int cur_line; | ||
110 | dma_addr_t next = 0 | VIA_DMA_DPR_EC; | ||
111 | drm_via_descriptor_t *desc_ptr = 0; | ||
112 | |||
113 | if (mode == 1) | ||
114 | desc_ptr = vsg->desc_pages[cur_descriptor_page]; | ||
115 | |||
116 | for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { | ||
117 | |||
118 | line_len = xfer->line_length; | ||
119 | cur_fb = fb_addr; | ||
120 | cur_mem = mem_addr; | ||
121 | |||
122 | while (line_len > 0) { | ||
123 | |||
124 | remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); | ||
125 | line_len -= remaining_len; | ||
126 | |||
127 | if (mode == 1) { | ||
128 | desc_ptr->mem_addr = | ||
129 | dma_map_page(&pdev->dev, | ||
130 | vsg->pages[VIA_PFN(cur_mem) - | ||
131 | VIA_PFN(first_addr)], | ||
132 | VIA_PGOFF(cur_mem), remaining_len, | ||
133 | vsg->direction); | ||
134 | desc_ptr->dev_addr = cur_fb; | ||
135 | |||
136 | desc_ptr->size = remaining_len; | ||
137 | desc_ptr->next = (uint32_t) next; | ||
138 | next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), | ||
139 | DMA_TO_DEVICE); | ||
140 | desc_ptr++; | ||
141 | if (++num_descriptors_this_page >= vsg->descriptors_per_page) { | ||
142 | num_descriptors_this_page = 0; | ||
143 | desc_ptr = vsg->desc_pages[++cur_descriptor_page]; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | num_desc++; | ||
148 | cur_mem += remaining_len; | ||
149 | cur_fb += remaining_len; | ||
150 | } | ||
151 | |||
152 | mem_addr += xfer->mem_stride; | ||
153 | fb_addr += xfer->fb_stride; | ||
154 | } | ||
155 | |||
156 | if (mode == 1) { | ||
157 | vsg->chain_start = next; | ||
158 | vsg->state = dr_via_device_mapped; | ||
159 | } | ||
160 | vsg->num_desc = num_desc; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Function that frees up all resources for a blit. It is usable even if the | ||
165 | * blit info has only be partially built as long as the status enum is consistent | ||
166 | * with the actual status of the used resources. | ||
167 | */ | ||
168 | |||
169 | |||
170 | void | ||
171 | via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) | ||
172 | { | ||
173 | struct page *page; | ||
174 | int i; | ||
175 | |||
176 | switch(vsg->state) { | ||
177 | case dr_via_device_mapped: | ||
178 | via_unmap_blit_from_device(pdev, vsg); | ||
179 | case dr_via_desc_pages_alloc: | ||
180 | for (i=0; i<vsg->num_desc_pages; ++i) { | ||
181 | if (vsg->desc_pages[i] != NULL) | ||
182 | free_page((unsigned long)vsg->desc_pages[i]); | ||
183 | } | ||
184 | kfree(vsg->desc_pages); | ||
185 | case dr_via_pages_locked: | ||
186 | for (i=0; i<vsg->num_pages; ++i) { | ||
187 | if ( NULL != (page = vsg->pages[i])) { | ||
188 | if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) | ||
189 | SetPageDirty(page); | ||
190 | page_cache_release(page); | ||
191 | } | ||
192 | } | ||
193 | case dr_via_pages_alloc: | ||
194 | vfree(vsg->pages); | ||
195 | default: | ||
196 | vsg->state = dr_via_sg_init; | ||
197 | } | ||
198 | if (vsg->bounce_buffer) { | ||
199 | vfree(vsg->bounce_buffer); | ||
200 | vsg->bounce_buffer = NULL; | ||
201 | } | ||
202 | vsg->free_on_sequence = 0; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Fire a blit engine. | ||
207 | */ | ||
208 | |||
209 | static void | ||
210 | via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) | ||
211 | { | ||
212 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
213 | |||
214 | VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); | ||
215 | VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); | ||
216 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | | ||
217 | VIA_DMA_CSR_DE); | ||
218 | VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); | ||
219 | VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); | ||
220 | VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); | ||
221 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will | ||
226 | * occur here if the calling user does not have access to the submitted address. | ||
227 | */ | ||
228 | |||
229 | static int | ||
230 | via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | ||
231 | { | ||
232 | int ret; | ||
233 | unsigned long first_pfn = VIA_PFN(xfer->mem_addr); | ||
234 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - | ||
235 | first_pfn + 1; | ||
236 | |||
237 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | ||
238 | return DRM_ERR(ENOMEM); | ||
239 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | ||
240 | down_read(¤t->mm->mmap_sem); | ||
241 | ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, | ||
242 | vsg->num_pages, vsg->direction, 0, vsg->pages, NULL); | ||
243 | |||
244 | up_read(¤t->mm->mmap_sem); | ||
245 | if (ret != vsg->num_pages) { | ||
246 | if (ret < 0) | ||
247 | return ret; | ||
248 | vsg->state = dr_via_pages_locked; | ||
249 | return DRM_ERR(EINVAL); | ||
250 | } | ||
251 | vsg->state = dr_via_pages_locked; | ||
252 | DRM_DEBUG("DMA pages locked\n"); | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the | ||
258 | * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be | ||
259 | * quite large for some blits, and pages don't need to be contingous. | ||
260 | */ | ||
261 | |||
262 | static int | ||
263 | via_alloc_desc_pages(drm_via_sg_info_t *vsg) | ||
264 | { | ||
265 | int i; | ||
266 | |||
267 | vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); | ||
268 | vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / | ||
269 | vsg->descriptors_per_page; | ||
270 | |||
271 | if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) | ||
272 | return DRM_ERR(ENOMEM); | ||
273 | |||
274 | memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); | ||
275 | vsg->state = dr_via_desc_pages_alloc; | ||
276 | for (i=0; i<vsg->num_desc_pages; ++i) { | ||
277 | if (NULL == (vsg->desc_pages[i] = | ||
278 | (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) | ||
279 | return DRM_ERR(ENOMEM); | ||
280 | } | ||
281 | DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, | ||
282 | vsg->num_desc); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void | ||
287 | via_abort_dmablit(drm_device_t *dev, int engine) | ||
288 | { | ||
289 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
290 | |||
291 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); | ||
292 | } | ||
293 | |||
294 | static void | ||
295 | via_dmablit_engine_off(drm_device_t *dev, int engine) | ||
296 | { | ||
297 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
298 | |||
299 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); | ||
300 | } | ||
301 | |||
302 | |||
303 | |||
304 | /* | ||
305 | * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. | ||
306 | * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue | ||
307 | * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while | ||
308 | * the workqueue task takes care of processing associated with the old blit. | ||
309 | */ | ||
310 | |||
311 | void | ||
312 | via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) | ||
313 | { | ||
314 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
315 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; | ||
316 | int cur; | ||
317 | int done_transfer; | ||
318 | unsigned long irqsave=0; | ||
319 | uint32_t status = 0; | ||
320 | |||
321 | DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", | ||
322 | engine, from_irq, (unsigned long) blitq); | ||
323 | |||
324 | if (from_irq) { | ||
325 | spin_lock(&blitq->blit_lock); | ||
326 | } else { | ||
327 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
328 | } | ||
329 | |||
330 | done_transfer = blitq->is_active && | ||
331 | (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); | ||
332 | done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); | ||
333 | |||
334 | cur = blitq->cur; | ||
335 | if (done_transfer) { | ||
336 | |||
337 | blitq->blits[cur]->aborted = blitq->aborting; | ||
338 | blitq->done_blit_handle++; | ||
339 | DRM_WAKEUP(blitq->blit_queue + cur); | ||
340 | |||
341 | cur++; | ||
342 | if (cur >= VIA_NUM_BLIT_SLOTS) | ||
343 | cur = 0; | ||
344 | blitq->cur = cur; | ||
345 | |||
346 | /* | ||
347 | * Clear transfer done flag. | ||
348 | */ | ||
349 | |||
350 | VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); | ||
351 | |||
352 | blitq->is_active = 0; | ||
353 | blitq->aborting = 0; | ||
354 | schedule_work(&blitq->wq); | ||
355 | |||
356 | } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { | ||
357 | |||
358 | /* | ||
359 | * Abort transfer after one second. | ||
360 | */ | ||
361 | |||
362 | via_abort_dmablit(dev, engine); | ||
363 | blitq->aborting = 1; | ||
364 | blitq->end = jiffies + DRM_HZ; | ||
365 | } | ||
366 | |||
367 | if (!blitq->is_active) { | ||
368 | if (blitq->num_outstanding) { | ||
369 | via_fire_dmablit(dev, blitq->blits[cur], engine); | ||
370 | blitq->is_active = 1; | ||
371 | blitq->cur = cur; | ||
372 | blitq->num_outstanding--; | ||
373 | blitq->end = jiffies + DRM_HZ; | ||
374 | if (!timer_pending(&blitq->poll_timer)) { | ||
375 | blitq->poll_timer.expires = jiffies+1; | ||
376 | add_timer(&blitq->poll_timer); | ||
377 | } | ||
378 | } else { | ||
379 | if (timer_pending(&blitq->poll_timer)) { | ||
380 | del_timer(&blitq->poll_timer); | ||
381 | } | ||
382 | via_dmablit_engine_off(dev, engine); | ||
383 | } | ||
384 | } | ||
385 | |||
386 | if (from_irq) { | ||
387 | spin_unlock(&blitq->blit_lock); | ||
388 | } else { | ||
389 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | |||
394 | |||
395 | /* | ||
396 | * Check whether this blit is still active, performing necessary locking. | ||
397 | */ | ||
398 | |||
399 | static int | ||
400 | via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) | ||
401 | { | ||
402 | unsigned long irqsave; | ||
403 | uint32_t slot; | ||
404 | int active; | ||
405 | |||
406 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
407 | |||
408 | /* | ||
409 | * Allow for handle wraparounds. | ||
410 | */ | ||
411 | |||
412 | active = ((blitq->done_blit_handle - handle) > (1 << 23)) && | ||
413 | ((blitq->cur_blit_handle - handle) <= (1 << 23)); | ||
414 | |||
415 | if (queue && active) { | ||
416 | slot = handle - blitq->done_blit_handle + blitq->cur -1; | ||
417 | if (slot >= VIA_NUM_BLIT_SLOTS) { | ||
418 | slot -= VIA_NUM_BLIT_SLOTS; | ||
419 | } | ||
420 | *queue = blitq->blit_queue + slot; | ||
421 | } | ||
422 | |||
423 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
424 | |||
425 | return active; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * Sync. Wait for at least three seconds for the blit to be performed. | ||
430 | */ | ||
431 | |||
432 | static int | ||
433 | via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) | ||
434 | { | ||
435 | |||
436 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
437 | drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; | ||
438 | wait_queue_head_t *queue; | ||
439 | int ret = 0; | ||
440 | |||
441 | if (via_dmablit_active(blitq, engine, handle, &queue)) { | ||
442 | DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, | ||
443 | !via_dmablit_active(blitq, engine, handle, NULL)); | ||
444 | } | ||
445 | DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", | ||
446 | handle, engine, ret); | ||
447 | |||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | |||
452 | /* | ||
453 | * A timer that regularly polls the blit engine in cases where we don't have interrupts: | ||
454 | * a) Broken hardware (typically those that don't have any video capture facility). | ||
455 | * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. | ||
456 | * The timer and hardware IRQ's can and do work in parallel. If the hardware has | ||
457 | * irqs, it will shorten the latency somewhat. | ||
458 | */ | ||
459 | |||
460 | |||
461 | |||
462 | static void | ||
463 | via_dmablit_timer(unsigned long data) | ||
464 | { | ||
465 | drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; | ||
466 | drm_device_t *dev = blitq->dev; | ||
467 | int engine = (int) | ||
468 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); | ||
469 | |||
470 | DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, | ||
471 | (unsigned long) jiffies); | ||
472 | |||
473 | via_dmablit_handler(dev, engine, 0); | ||
474 | |||
475 | if (!timer_pending(&blitq->poll_timer)) { | ||
476 | blitq->poll_timer.expires = jiffies+1; | ||
477 | add_timer(&blitq->poll_timer); | ||
478 | } | ||
479 | via_dmablit_handler(dev, engine, 0); | ||
480 | |||
481 | } | ||
482 | |||
483 | |||
484 | |||
485 | |||
486 | /* | ||
487 | * Workqueue task that frees data and mappings associated with a blit. | ||
488 | * Also wakes up waiting processes. Each of these tasks handles one | ||
489 | * blit engine only and may not be called on each interrupt. | ||
490 | */ | ||
491 | |||
492 | |||
493 | static void | ||
494 | via_dmablit_workqueue(void *data) | ||
495 | { | ||
496 | drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; | ||
497 | drm_device_t *dev = blitq->dev; | ||
498 | unsigned long irqsave; | ||
499 | drm_via_sg_info_t *cur_sg; | ||
500 | int cur_released; | ||
501 | |||
502 | |||
503 | DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) | ||
504 | (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); | ||
505 | |||
506 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
507 | |||
508 | while(blitq->serviced != blitq->cur) { | ||
509 | |||
510 | cur_released = blitq->serviced++; | ||
511 | |||
512 | DRM_DEBUG("Releasing blit slot %d\n", cur_released); | ||
513 | |||
514 | if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) | ||
515 | blitq->serviced = 0; | ||
516 | |||
517 | cur_sg = blitq->blits[cur_released]; | ||
518 | blitq->num_free++; | ||
519 | |||
520 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
521 | |||
522 | DRM_WAKEUP(&blitq->busy_queue); | ||
523 | |||
524 | via_free_sg_info(dev->pdev, cur_sg); | ||
525 | kfree(cur_sg); | ||
526 | |||
527 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
528 | } | ||
529 | |||
530 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
531 | } | ||
532 | |||
533 | |||
534 | /* | ||
535 | * Init all blit engines. Currently we use two, but some hardware have 4. | ||
536 | */ | ||
537 | |||
538 | |||
539 | void | ||
540 | via_init_dmablit(drm_device_t *dev) | ||
541 | { | ||
542 | int i,j; | ||
543 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
544 | drm_via_blitq_t *blitq; | ||
545 | |||
546 | pci_set_master(dev->pdev); | ||
547 | |||
548 | for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { | ||
549 | blitq = dev_priv->blit_queues + i; | ||
550 | blitq->dev = dev; | ||
551 | blitq->cur_blit_handle = 0; | ||
552 | blitq->done_blit_handle = 0; | ||
553 | blitq->head = 0; | ||
554 | blitq->cur = 0; | ||
555 | blitq->serviced = 0; | ||
556 | blitq->num_free = VIA_NUM_BLIT_SLOTS; | ||
557 | blitq->num_outstanding = 0; | ||
558 | blitq->is_active = 0; | ||
559 | blitq->aborting = 0; | ||
560 | blitq->blit_lock = SPIN_LOCK_UNLOCKED; | ||
561 | for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { | ||
562 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | ||
563 | } | ||
564 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); | ||
565 | INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); | ||
566 | init_timer(&blitq->poll_timer); | ||
567 | blitq->poll_timer.function = &via_dmablit_timer; | ||
568 | blitq->poll_timer.data = (unsigned long) blitq; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * Build all info and do all mappings required for a blit. | ||
574 | */ | ||
575 | |||
576 | |||
577 | static int | ||
578 | via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | ||
579 | { | ||
580 | int draw = xfer->to_fb; | ||
581 | int ret = 0; | ||
582 | |||
583 | vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | ||
584 | vsg->bounce_buffer = 0; | ||
585 | |||
586 | vsg->state = dr_via_sg_init; | ||
587 | |||
588 | if (xfer->num_lines <= 0 || xfer->line_length <= 0) { | ||
589 | DRM_ERROR("Zero size bitblt.\n"); | ||
590 | return DRM_ERR(EINVAL); | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Below check is a driver limitation, not a hardware one. We | ||
595 | * don't want to lock unused pages, and don't want to incoporate the | ||
596 | * extra logic of avoiding them. Make sure there are no. | ||
597 | * (Not a big limitation anyway.) | ||
598 | */ | ||
599 | |||
600 | if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || | ||
601 | (xfer->mem_stride > 2048*4)) { | ||
602 | DRM_ERROR("Too large system memory stride. Stride: %d, " | ||
603 | "Length: %d\n", xfer->mem_stride, xfer->line_length); | ||
604 | return DRM_ERR(EINVAL); | ||
605 | } | ||
606 | |||
607 | if (xfer->num_lines > 2048) { | ||
608 | DRM_ERROR("Too many PCI DMA bitblt lines.\n"); | ||
609 | return DRM_ERR(EINVAL); | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * we allow a negative fb stride to allow flipping of images in | ||
614 | * transfer. | ||
615 | */ | ||
616 | |||
617 | if (xfer->mem_stride < xfer->line_length || | ||
618 | abs(xfer->fb_stride) < xfer->line_length) { | ||
619 | DRM_ERROR("Invalid frame-buffer / memory stride.\n"); | ||
620 | return DRM_ERR(EINVAL); | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * A hardware bug seems to be worked around if system memory addresses start on | ||
625 | * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted | ||
626 | * about this. Meanwhile, impose the following restrictions: | ||
627 | */ | ||
628 | |||
629 | #ifdef VIA_BUGFREE | ||
630 | if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || | ||
631 | ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { | ||
632 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); | ||
633 | return DRM_ERR(EINVAL); | ||
634 | } | ||
635 | #else | ||
636 | if ((((unsigned long)xfer->mem_addr & 15) || | ||
637 | ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) || | ||
638 | (xfer->fb_stride & 3)) { | ||
639 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); | ||
640 | return DRM_ERR(EINVAL); | ||
641 | } | ||
642 | #endif | ||
643 | |||
644 | if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { | ||
645 | DRM_ERROR("Could not lock DMA pages.\n"); | ||
646 | via_free_sg_info(dev->pdev, vsg); | ||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | via_map_blit_for_device(dev->pdev, xfer, vsg, 0); | ||
651 | if (0 != (ret = via_alloc_desc_pages(vsg))) { | ||
652 | DRM_ERROR("Could not allocate DMA descriptor pages.\n"); | ||
653 | via_free_sg_info(dev->pdev, vsg); | ||
654 | return ret; | ||
655 | } | ||
656 | via_map_blit_for_device(dev->pdev, xfer, vsg, 1); | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | |||
662 | /* | ||
663 | * Reserve one free slot in the blit queue. Will wait for one second for one | ||
664 | * to become available. Otherwise -EBUSY is returned. | ||
665 | */ | ||
666 | |||
667 | static int | ||
668 | via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) | ||
669 | { | ||
670 | int ret=0; | ||
671 | unsigned long irqsave; | ||
672 | |||
673 | DRM_DEBUG("Num free is %d\n", blitq->num_free); | ||
674 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
675 | while(blitq->num_free == 0) { | ||
676 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
677 | |||
678 | DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); | ||
679 | if (ret) { | ||
680 | return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; | ||
681 | } | ||
682 | |||
683 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
684 | } | ||
685 | |||
686 | blitq->num_free--; | ||
687 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * Hand back a free slot if we changed our mind. | ||
694 | */ | ||
695 | |||
696 | static void | ||
697 | via_dmablit_release_slot(drm_via_blitq_t *blitq) | ||
698 | { | ||
699 | unsigned long irqsave; | ||
700 | |||
701 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
702 | blitq->num_free++; | ||
703 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
704 | DRM_WAKEUP( &blitq->busy_queue ); | ||
705 | } | ||
706 | |||
707 | /* | ||
708 | * Grab a free slot. Build blit info and queue a blit. | ||
709 | */ | ||
710 | |||
711 | |||
712 | static int | ||
713 | via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) | ||
714 | { | ||
715 | drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; | ||
716 | drm_via_sg_info_t *vsg; | ||
717 | drm_via_blitq_t *blitq; | ||
718 | int ret; | ||
719 | int engine; | ||
720 | unsigned long irqsave; | ||
721 | |||
722 | if (dev_priv == NULL) { | ||
723 | DRM_ERROR("Called without initialization.\n"); | ||
724 | return DRM_ERR(EINVAL); | ||
725 | } | ||
726 | |||
727 | engine = (xfer->to_fb) ? 0 : 1; | ||
728 | blitq = dev_priv->blit_queues + engine; | ||
729 | if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { | ||
730 | return ret; | ||
731 | } | ||
732 | if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { | ||
733 | via_dmablit_release_slot(blitq); | ||
734 | return DRM_ERR(ENOMEM); | ||
735 | } | ||
736 | if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { | ||
737 | via_dmablit_release_slot(blitq); | ||
738 | kfree(vsg); | ||
739 | return ret; | ||
740 | } | ||
741 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | ||
742 | |||
743 | blitq->blits[blitq->head++] = vsg; | ||
744 | if (blitq->head >= VIA_NUM_BLIT_SLOTS) | ||
745 | blitq->head = 0; | ||
746 | blitq->num_outstanding++; | ||
747 | xfer->sync.sync_handle = ++blitq->cur_blit_handle; | ||
748 | |||
749 | spin_unlock_irqrestore(&blitq->blit_lock, irqsave); | ||
750 | xfer->sync.engine = engine; | ||
751 | |||
752 | via_dmablit_handler(dev, engine, 0); | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | /* | ||
758 | * Sync on a previously submitted blit. Note that the X server use signals extensively, and | ||
759 | * that there is a very big proability that this IOCTL will be interrupted by a signal. In that | ||
760 | * case it returns with -EAGAIN for the signal to be delivered. | ||
761 | * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). | ||
762 | */ | ||
763 | |||
764 | int | ||
765 | via_dma_blit_sync( DRM_IOCTL_ARGS ) | ||
766 | { | ||
767 | drm_via_blitsync_t sync; | ||
768 | int err; | ||
769 | DRM_DEVICE; | ||
770 | |||
771 | DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); | ||
772 | |||
773 | if (sync.engine >= VIA_NUM_BLIT_ENGINES) | ||
774 | return DRM_ERR(EINVAL); | ||
775 | |||
776 | err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); | ||
777 | |||
778 | if (DRM_ERR(EINTR) == err) | ||
779 | err = DRM_ERR(EAGAIN); | ||
780 | |||
781 | return err; | ||
782 | } | ||
783 | |||
784 | |||
785 | /* | ||
786 | * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal | ||
787 | * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should | ||
788 | * be reissued. See the above IOCTL code. | ||
789 | */ | ||
790 | |||
791 | int | ||
792 | via_dma_blit( DRM_IOCTL_ARGS ) | ||
793 | { | ||
794 | drm_via_dmablit_t xfer; | ||
795 | int err; | ||
796 | DRM_DEVICE; | ||
797 | |||
798 | DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); | ||
799 | |||
800 | err = via_dmablit(dev, &xfer); | ||
801 | |||
802 | DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); | ||
803 | |||
804 | return err; | ||
805 | } | ||
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h new file mode 100644 index 000000000000..6486391746b1 --- /dev/null +++ b/drivers/char/drm/via_dmablit.h | |||
@@ -0,0 +1,138 @@ | |||
1 | /* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro | ||
2 | * | ||
3 | * Copyright 2005 Thomas Hellstrom. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
21 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
22 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
23 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Authors: | ||
26 | * Thomas Hellstrom. | ||
27 | * Register info from Digeo Inc. | ||
28 | */ | ||
29 | |||
30 | #ifndef _VIA_DMABLIT_H | ||
31 | #define _VIA_DMABLIT_H | ||
32 | |||
33 | #define VIA_NUM_BLIT_ENGINES 2 | ||
34 | #define VIA_NUM_BLIT_SLOTS 8 | ||
35 | |||
36 | struct _drm_via_descriptor; | ||
37 | |||
38 | typedef struct _drm_via_sg_info { | ||
39 | struct page **pages; | ||
40 | unsigned long num_pages; | ||
41 | struct _drm_via_descriptor **desc_pages; | ||
42 | int num_desc_pages; | ||
43 | int num_desc; | ||
44 | enum dma_data_direction direction; | ||
45 | unsigned char *bounce_buffer; | ||
46 | dma_addr_t chain_start; | ||
47 | uint32_t free_on_sequence; | ||
48 | unsigned int descriptors_per_page; | ||
49 | int aborted; | ||
50 | enum { | ||
51 | dr_via_device_mapped, | ||
52 | dr_via_desc_pages_alloc, | ||
53 | dr_via_pages_locked, | ||
54 | dr_via_pages_alloc, | ||
55 | dr_via_sg_init | ||
56 | } state; | ||
57 | } drm_via_sg_info_t; | ||
58 | |||
59 | typedef struct _drm_via_blitq { | ||
60 | drm_device_t *dev; | ||
61 | uint32_t cur_blit_handle; | ||
62 | uint32_t done_blit_handle; | ||
63 | unsigned serviced; | ||
64 | unsigned head; | ||
65 | unsigned cur; | ||
66 | unsigned num_free; | ||
67 | unsigned num_outstanding; | ||
68 | unsigned long end; | ||
69 | int aborting; | ||
70 | int is_active; | ||
71 | drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; | ||
72 | spinlock_t blit_lock; | ||
73 | wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS]; | ||
74 | wait_queue_head_t busy_queue; | ||
75 | struct work_struct wq; | ||
76 | struct timer_list poll_timer; | ||
77 | } drm_via_blitq_t; | ||
78 | |||
79 | |||
80 | /* | ||
81 | * PCI DMA Registers | ||
82 | * Channels 2 & 3 don't seem to be implemented in hardware. | ||
83 | */ | ||
84 | |||
85 | #define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */ | ||
86 | #define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */ | ||
87 | #define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */ | ||
88 | #define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */ | ||
89 | |||
90 | #define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */ | ||
91 | #define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */ | ||
92 | #define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */ | ||
93 | #define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */ | ||
94 | |||
95 | #define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */ | ||
96 | #define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */ | ||
97 | #define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */ | ||
98 | #define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */ | ||
99 | |||
100 | #define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */ | ||
101 | #define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */ | ||
102 | #define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */ | ||
103 | #define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */ | ||
104 | |||
105 | #define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */ | ||
106 | #define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */ | ||
107 | #define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */ | ||
108 | #define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */ | ||
109 | |||
110 | #define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */ | ||
111 | #define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */ | ||
112 | #define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */ | ||
113 | #define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */ | ||
114 | |||
115 | #define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */ | ||
116 | |||
117 | /* Define for DMA engine */ | ||
118 | /* DPR */ | ||
119 | #define VIA_DMA_DPR_EC (1<<1) /* end of chain */ | ||
120 | #define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */ | ||
121 | #define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */ | ||
122 | |||
123 | /* MR */ | ||
124 | #define VIA_DMA_MR_CM (1<<0) /* chaining mode */ | ||
125 | #define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */ | ||
126 | #define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */ | ||
127 | |||
128 | /* CSR */ | ||
129 | #define VIA_DMA_CSR_DE (1<<0) /* DMA enable */ | ||
130 | #define VIA_DMA_CSR_TS (1<<1) /* transfer start */ | ||
131 | #define VIA_DMA_CSR_TA (1<<2) /* transfer abort */ | ||
132 | #define VIA_DMA_CSR_TD (1<<3) /* transfer done */ | ||
133 | #define VIA_DMA_CSR_DD (1<<4) /* descriptor done */ | ||
134 | #define VIA_DMA_DPR_EC (1<<1) /* end of chain */ | ||
135 | |||
136 | |||
137 | |||
138 | #endif | ||
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h index 556d80722fd0..47f0b5b26379 100644 --- a/drivers/char/drm/via_drm.h +++ b/drivers/char/drm/via_drm.h | |||
@@ -200,7 +200,7 @@ typedef struct _drm_via_sarea { | |||
200 | unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; | 200 | unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; |
201 | unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ | 201 | unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ |
202 | 202 | ||
203 | /* Used bt the 3d driver only at this point, for pageflipping: | 203 | /* Used by the 3d driver only at this point, for pageflipping: |
204 | */ | 204 | */ |
205 | unsigned int pfCurrentOffset; | 205 | unsigned int pfCurrentOffset; |
206 | } drm_via_sarea_t; | 206 | } drm_via_sarea_t; |
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h index 0606c752dccb..aad4f99f5405 100644 --- a/drivers/char/drm/via_drv.h +++ b/drivers/char/drm/via_drv.h | |||
@@ -28,11 +28,11 @@ | |||
28 | 28 | ||
29 | #define DRIVER_NAME "via" | 29 | #define DRIVER_NAME "via" |
30 | #define DRIVER_DESC "VIA Unichrome / Pro" | 30 | #define DRIVER_DESC "VIA Unichrome / Pro" |
31 | #define DRIVER_DATE "20051022" | 31 | #define DRIVER_DATE "20051116" |
32 | 32 | ||
33 | #define DRIVER_MAJOR 2 | 33 | #define DRIVER_MAJOR 2 |
34 | #define DRIVER_MINOR 7 | 34 | #define DRIVER_MINOR 7 |
35 | #define DRIVER_PATCHLEVEL 2 | 35 | #define DRIVER_PATCHLEVEL 4 |
36 | 36 | ||
37 | #include "via_verifier.h" | 37 | #include "via_verifier.h" |
38 | 38 | ||
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c index 62e692556a1d..33e0cb12e4c3 100644 --- a/drivers/char/drm/via_mm.c +++ b/drivers/char/drm/via_mm.c | |||
@@ -42,7 +42,7 @@ static int via_agp_free(drm_via_mem_t * mem); | |||
42 | static int via_fb_alloc(drm_via_mem_t * mem); | 42 | static int via_fb_alloc(drm_via_mem_t * mem); |
43 | static int via_fb_free(drm_via_mem_t * mem); | 43 | static int via_fb_free(drm_via_mem_t * mem); |
44 | 44 | ||
45 | static int add_alloc_set(int context, int type, unsigned int val) | 45 | static int add_alloc_set(int context, int type, unsigned long val) |
46 | { | 46 | { |
47 | int i, retval = 0; | 47 | int i, retval = 0; |
48 | 48 | ||
@@ -56,7 +56,7 @@ static int add_alloc_set(int context, int type, unsigned int val) | |||
56 | return retval; | 56 | return retval; |
57 | } | 57 | } |
58 | 58 | ||
59 | static int del_alloc_set(int context, int type, unsigned int val) | 59 | static int del_alloc_set(int context, int type, unsigned long val) |
60 | { | 60 | { |
61 | int i, retval = 0; | 61 | int i, retval = 0; |
62 | 62 | ||