aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/via_dmablit.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2006-01-01 22:26:20 -0500
committerDave Airlie <airlied@linux.ie>2006-01-01 22:26:20 -0500
commit443448d05468277abe99c9b24b9df538dd840f35 (patch)
treebea36178d2ff1e88be9086c95be6c023f9a67f83 /drivers/char/drm/via_dmablit.c
parenta7a2cc315c8a5e51b08538d102ec3229c966ac87 (diff)
drm: via driver update to CVS version
This updates the DRM via driver to the latest CVS version, which contains support for DMA blitting. It also contains some whitespace and other minor fixes From: Thomas Hellstrom <unichrome@shipmail.org> Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/via_dmablit.c')
-rw-r--r--drivers/char/drm/via_dmablit.c805
1 files changed, 805 insertions, 0 deletions
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
new file mode 100644
index 000000000000..9d5e027dae0e
--- /dev/null
+++ b/drivers/char/drm/via_dmablit.c
@@ -0,0 +1,805 @@
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Thomas Hellstrom.
26 * Partially based on code obtained from Digeo Inc.
27 */
28
29
30/*
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
35 */
36
37#include "drmP.h"
38#include "via_drm.h"
39#include "via_drv.h"
40#include "via_dmablit.h"
41
42#include <linux/pagemap.h>
43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47
48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr;
50 uint32_t dev_addr;
51 uint32_t size;
52 uint32_t next;
53} drm_via_descriptor_t;
54
55
56/*
57 * Unmap a DMA mapping.
58 */
59
60
61
62static void
63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
64{
65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 descriptor_this_page;
70 dma_addr_t next = vsg->chain_start;
71
72 while(num_desc--) {
73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 descriptor_this_page;
78 }
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
81 next = (dma_addr_t) desc_ptr->next;
82 desc_ptr--;
83 }
84}
85
86/*
87 * If mode = 0, count how many descriptors are needed.
88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
90 * 'next' field without syncing calls when the descriptor is already mapped.
91 */
92
93static void
94via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg,
97 int mode)
98{
99 unsigned cur_descriptor_page = 0;
100 unsigned num_descriptors_this_page = 0;
101 unsigned char *mem_addr = xfer->mem_addr;
102 unsigned char *cur_mem;
103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
104 uint32_t fb_addr = xfer->fb_addr;
105 uint32_t cur_fb;
106 unsigned long line_len;
107 unsigned remaining_len;
108 int num_desc = 0;
109 int cur_line;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = 0;
112
113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
117
118 line_len = xfer->line_length;
119 cur_fb = fb_addr;
120 cur_mem = mem_addr;
121
122 while (line_len > 0) {
123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len;
126
127 if (mode == 1) {
128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb;
135
136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE);
140 desc_ptr++;
141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
142 num_descriptors_this_page = 0;
143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 }
145 }
146
147 num_desc++;
148 cur_mem += remaining_len;
149 cur_fb += remaining_len;
150 }
151
152 mem_addr += xfer->mem_stride;
153 fb_addr += xfer->fb_stride;
154 }
155
156 if (mode == 1) {
157 vsg->chain_start = next;
158 vsg->state = dr_via_device_mapped;
159 }
160 vsg->num_desc = num_desc;
161}
162
163/*
164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only be partially built as long as the status enum is consistent
166 * with the actual status of the used resources.
167 */
168
169
170void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{
173 struct page *page;
174 int i;
175
176 switch(vsg->state) {
177 case dr_via_device_mapped:
178 via_unmap_blit_from_device(pdev, vsg);
179 case dr_via_desc_pages_alloc:
180 for (i=0; i<vsg->num_desc_pages; ++i) {
181 if (vsg->desc_pages[i] != NULL)
182 free_page((unsigned long)vsg->desc_pages[i]);
183 }
184 kfree(vsg->desc_pages);
185 case dr_via_pages_locked:
186 for (i=0; i<vsg->num_pages; ++i) {
187 if ( NULL != (page = vsg->pages[i])) {
188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 SetPageDirty(page);
190 page_cache_release(page);
191 }
192 }
193 case dr_via_pages_alloc:
194 vfree(vsg->pages);
195 default:
196 vsg->state = dr_via_sg_init;
197 }
198 if (vsg->bounce_buffer) {
199 vfree(vsg->bounce_buffer);
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0;
203}
204
205/*
206 * Fire a blit engine.
207 */
208
209static void
210via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
211{
212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213
214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_DMA_CSR_DE);
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
222}
223
224/*
225 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
226 * occur here if the calling user does not have access to the submitted address.
227 */
228
229static int
230via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
231{
232 int ret;
233 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
234 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
235 first_pfn + 1;
236
237 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
238 return DRM_ERR(ENOMEM);
239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
240 down_read(&current->mm->mmap_sem);
241 ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
242 vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);
243
244 up_read(&current->mm->mmap_sem);
245 if (ret != vsg->num_pages) {
246 if (ret < 0)
247 return ret;
248 vsg->state = dr_via_pages_locked;
249 return DRM_ERR(EINVAL);
250 }
251 vsg->state = dr_via_pages_locked;
252 DRM_DEBUG("DMA pages locked\n");
253 return 0;
254}
255
256/*
257 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
258 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
259 * quite large for some blits, and pages don't need to be contingous.
260 */
261
262static int
263via_alloc_desc_pages(drm_via_sg_info_t *vsg)
264{
265 int i;
266
267 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
268 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
269 vsg->descriptors_per_page;
270
271 if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
272 return DRM_ERR(ENOMEM);
273
274 memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
275 vsg->state = dr_via_desc_pages_alloc;
276 for (i=0; i<vsg->num_desc_pages; ++i) {
277 if (NULL == (vsg->desc_pages[i] =
278 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
279 return DRM_ERR(ENOMEM);
280 }
281 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
282 vsg->num_desc);
283 return 0;
284}
285
286static void
287via_abort_dmablit(drm_device_t *dev, int engine)
288{
289 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
290
291 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
292}
293
294static void
295via_dmablit_engine_off(drm_device_t *dev, int engine)
296{
297 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
298
299 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
300}
301
302
303
304/*
305 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
306 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
307 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
308 * the workqueue task takes care of processing associated with the old blit.
309 */
310
311void
312via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
313{
314 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
315 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
316 int cur;
317 int done_transfer;
318 unsigned long irqsave=0;
319 uint32_t status = 0;
320
321 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
322 engine, from_irq, (unsigned long) blitq);
323
324 if (from_irq) {
325 spin_lock(&blitq->blit_lock);
326 } else {
327 spin_lock_irqsave(&blitq->blit_lock, irqsave);
328 }
329
330 done_transfer = blitq->is_active &&
331 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
332 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
333
334 cur = blitq->cur;
335 if (done_transfer) {
336
337 blitq->blits[cur]->aborted = blitq->aborting;
338 blitq->done_blit_handle++;
339 DRM_WAKEUP(blitq->blit_queue + cur);
340
341 cur++;
342 if (cur >= VIA_NUM_BLIT_SLOTS)
343 cur = 0;
344 blitq->cur = cur;
345
346 /*
347 * Clear transfer done flag.
348 */
349
350 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
351
352 blitq->is_active = 0;
353 blitq->aborting = 0;
354 schedule_work(&blitq->wq);
355
356 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
357
358 /*
359 * Abort transfer after one second.
360 */
361
362 via_abort_dmablit(dev, engine);
363 blitq->aborting = 1;
364 blitq->end = jiffies + DRM_HZ;
365 }
366
367 if (!blitq->is_active) {
368 if (blitq->num_outstanding) {
369 via_fire_dmablit(dev, blitq->blits[cur], engine);
370 blitq->is_active = 1;
371 blitq->cur = cur;
372 blitq->num_outstanding--;
373 blitq->end = jiffies + DRM_HZ;
374 if (!timer_pending(&blitq->poll_timer)) {
375 blitq->poll_timer.expires = jiffies+1;
376 add_timer(&blitq->poll_timer);
377 }
378 } else {
379 if (timer_pending(&blitq->poll_timer)) {
380 del_timer(&blitq->poll_timer);
381 }
382 via_dmablit_engine_off(dev, engine);
383 }
384 }
385
386 if (from_irq) {
387 spin_unlock(&blitq->blit_lock);
388 } else {
389 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
390 }
391}
392
393
394
395/*
396 * Check whether this blit is still active, performing necessary locking.
397 */
398
399static int
400via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
401{
402 unsigned long irqsave;
403 uint32_t slot;
404 int active;
405
406 spin_lock_irqsave(&blitq->blit_lock, irqsave);
407
408 /*
409 * Allow for handle wraparounds.
410 */
411
412 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
413 ((blitq->cur_blit_handle - handle) <= (1 << 23));
414
415 if (queue && active) {
416 slot = handle - blitq->done_blit_handle + blitq->cur -1;
417 if (slot >= VIA_NUM_BLIT_SLOTS) {
418 slot -= VIA_NUM_BLIT_SLOTS;
419 }
420 *queue = blitq->blit_queue + slot;
421 }
422
423 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
424
425 return active;
426}
427
428/*
429 * Sync. Wait for at least three seconds for the blit to be performed.
430 */
431
432static int
433via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
434{
435
436 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
437 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
438 wait_queue_head_t *queue;
439 int ret = 0;
440
441 if (via_dmablit_active(blitq, engine, handle, &queue)) {
442 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
443 !via_dmablit_active(blitq, engine, handle, NULL));
444 }
445 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
446 handle, engine, ret);
447
448 return ret;
449}
450
451
452/*
453 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
454 * a) Broken hardware (typically those that don't have any video capture facility).
455 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
456 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
457 * irqs, it will shorten the latency somewhat.
458 */
459
460
461
462static void
463via_dmablit_timer(unsigned long data)
464{
465 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
466 drm_device_t *dev = blitq->dev;
467 int engine = (int)
468 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
469
470 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
471 (unsigned long) jiffies);
472
473 via_dmablit_handler(dev, engine, 0);
474
475 if (!timer_pending(&blitq->poll_timer)) {
476 blitq->poll_timer.expires = jiffies+1;
477 add_timer(&blitq->poll_timer);
478 }
479 via_dmablit_handler(dev, engine, 0);
480
481}
482
483
484
485
486/*
487 * Workqueue task that frees data and mappings associated with a blit.
488 * Also wakes up waiting processes. Each of these tasks handles one
489 * blit engine only and may not be called on each interrupt.
490 */
491
492
493static void
494via_dmablit_workqueue(void *data)
495{
496 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
497 drm_device_t *dev = blitq->dev;
498 unsigned long irqsave;
499 drm_via_sg_info_t *cur_sg;
500 int cur_released;
501
502
503 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
504 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
505
506 spin_lock_irqsave(&blitq->blit_lock, irqsave);
507
508 while(blitq->serviced != blitq->cur) {
509
510 cur_released = blitq->serviced++;
511
512 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
513
514 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
515 blitq->serviced = 0;
516
517 cur_sg = blitq->blits[cur_released];
518 blitq->num_free++;
519
520 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
521
522 DRM_WAKEUP(&blitq->busy_queue);
523
524 via_free_sg_info(dev->pdev, cur_sg);
525 kfree(cur_sg);
526
527 spin_lock_irqsave(&blitq->blit_lock, irqsave);
528 }
529
530 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
531}
532
533
534/*
535 * Init all blit engines. Currently we use two, but some hardware have 4.
536 */
537
538
539void
540via_init_dmablit(drm_device_t *dev)
541{
542 int i,j;
543 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
544 drm_via_blitq_t *blitq;
545
546 pci_set_master(dev->pdev);
547
548 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
549 blitq = dev_priv->blit_queues + i;
550 blitq->dev = dev;
551 blitq->cur_blit_handle = 0;
552 blitq->done_blit_handle = 0;
553 blitq->head = 0;
554 blitq->cur = 0;
555 blitq->serviced = 0;
556 blitq->num_free = VIA_NUM_BLIT_SLOTS;
557 blitq->num_outstanding = 0;
558 blitq->is_active = 0;
559 blitq->aborting = 0;
560 blitq->blit_lock = SPIN_LOCK_UNLOCKED;
561 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
562 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
563 }
564 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
565 INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
566 init_timer(&blitq->poll_timer);
567 blitq->poll_timer.function = &via_dmablit_timer;
568 blitq->poll_timer.data = (unsigned long) blitq;
569 }
570}
571
572/*
573 * Build all info and do all mappings required for a blit.
574 */
575
576
577static int
578via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
579{
580 int draw = xfer->to_fb;
581 int ret = 0;
582
583 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
584 vsg->bounce_buffer = 0;
585
586 vsg->state = dr_via_sg_init;
587
588 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
589 DRM_ERROR("Zero size bitblt.\n");
590 return DRM_ERR(EINVAL);
591 }
592
593 /*
594 * Below check is a driver limitation, not a hardware one. We
595 * don't want to lock unused pages, and don't want to incoporate the
596 * extra logic of avoiding them. Make sure there are no.
597 * (Not a big limitation anyway.)
598 */
599
600 if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
601 (xfer->mem_stride > 2048*4)) {
602 DRM_ERROR("Too large system memory stride. Stride: %d, "
603 "Length: %d\n", xfer->mem_stride, xfer->line_length);
604 return DRM_ERR(EINVAL);
605 }
606
607 if (xfer->num_lines > 2048) {
608 DRM_ERROR("Too many PCI DMA bitblt lines.\n");
609 return DRM_ERR(EINVAL);
610 }
611
612 /*
613 * we allow a negative fb stride to allow flipping of images in
614 * transfer.
615 */
616
617 if (xfer->mem_stride < xfer->line_length ||
618 abs(xfer->fb_stride) < xfer->line_length) {
619 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
620 return DRM_ERR(EINVAL);
621 }
622
623 /*
624 * A hardware bug seems to be worked around if system memory addresses start on
625 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
626 * about this. Meanwhile, impose the following restrictions:
627 */
628
629#ifdef VIA_BUGFREE
630 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
631 ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
632 DRM_ERROR("Invalid DRM bitblt alignment.\n");
633 return DRM_ERR(EINVAL);
634 }
635#else
636 if ((((unsigned long)xfer->mem_addr & 15) ||
637 ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||
638 (xfer->fb_stride & 3)) {
639 DRM_ERROR("Invalid DRM bitblt alignment.\n");
640 return DRM_ERR(EINVAL);
641 }
642#endif
643
644 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
645 DRM_ERROR("Could not lock DMA pages.\n");
646 via_free_sg_info(dev->pdev, vsg);
647 return ret;
648 }
649
650 via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
651 if (0 != (ret = via_alloc_desc_pages(vsg))) {
652 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
653 via_free_sg_info(dev->pdev, vsg);
654 return ret;
655 }
656 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
657
658 return 0;
659}
660
661
662/*
663 * Reserve one free slot in the blit queue. Will wait for one second for one
664 * to become available. Otherwise -EBUSY is returned.
665 */
666
667static int
668via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
669{
670 int ret=0;
671 unsigned long irqsave;
672
673 DRM_DEBUG("Num free is %d\n", blitq->num_free);
674 spin_lock_irqsave(&blitq->blit_lock, irqsave);
675 while(blitq->num_free == 0) {
676 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
677
678 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
679 if (ret) {
680 return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
681 }
682
683 spin_lock_irqsave(&blitq->blit_lock, irqsave);
684 }
685
686 blitq->num_free--;
687 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
688
689 return 0;
690}
691
692/*
693 * Hand back a free slot if we changed our mind.
694 */
695
696static void
697via_dmablit_release_slot(drm_via_blitq_t *blitq)
698{
699 unsigned long irqsave;
700
701 spin_lock_irqsave(&blitq->blit_lock, irqsave);
702 blitq->num_free++;
703 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
704 DRM_WAKEUP( &blitq->busy_queue );
705}
706
707/*
708 * Grab a free slot. Build blit info and queue a blit.
709 */
710
711
712static int
713via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
714{
715 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
716 drm_via_sg_info_t *vsg;
717 drm_via_blitq_t *blitq;
718 int ret;
719 int engine;
720 unsigned long irqsave;
721
722 if (dev_priv == NULL) {
723 DRM_ERROR("Called without initialization.\n");
724 return DRM_ERR(EINVAL);
725 }
726
727 engine = (xfer->to_fb) ? 0 : 1;
728 blitq = dev_priv->blit_queues + engine;
729 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
730 return ret;
731 }
732 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
733 via_dmablit_release_slot(blitq);
734 return DRM_ERR(ENOMEM);
735 }
736 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
737 via_dmablit_release_slot(blitq);
738 kfree(vsg);
739 return ret;
740 }
741 spin_lock_irqsave(&blitq->blit_lock, irqsave);
742
743 blitq->blits[blitq->head++] = vsg;
744 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
745 blitq->head = 0;
746 blitq->num_outstanding++;
747 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
748
749 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
750 xfer->sync.engine = engine;
751
752 via_dmablit_handler(dev, engine, 0);
753
754 return 0;
755}
756
757/*
758 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
759 * that there is a very big proability that this IOCTL will be interrupted by a signal. In that
760 * case it returns with -EAGAIN for the signal to be delivered.
761 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
762 */
763
764int
765via_dma_blit_sync( DRM_IOCTL_ARGS )
766{
767 drm_via_blitsync_t sync;
768 int err;
769 DRM_DEVICE;
770
771 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
772
773 if (sync.engine >= VIA_NUM_BLIT_ENGINES)
774 return DRM_ERR(EINVAL);
775
776 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
777
778 if (DRM_ERR(EINTR) == err)
779 err = DRM_ERR(EAGAIN);
780
781 return err;
782}
783
784
785/*
786 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
787 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
788 * be reissued. See the above IOCTL code.
789 */
790
791int
792via_dma_blit( DRM_IOCTL_ARGS )
793{
794 drm_via_dmablit_t xfer;
795 int err;
796 DRM_DEVICE;
797
798 DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
799
800 err = via_dmablit(dev, &xfer);
801
802 DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
803
804 return err;
805}