aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/via_dmablit.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2007-11-04 21:50:58 -0500
committerDave Airlie <airlied@redhat.com>2008-02-07 00:09:38 -0500
commitbc5f4523f772cc7629c5c5a46cf4f2a07a5500b8 (patch)
tree8fa2f5194bb05d7e789e5d24a0fe3a7456568146 /drivers/char/drm/via_dmablit.c
parent8562b3f25d6e23c9d9e48a32672944d1e8a2aa97 (diff)
drm: run cleanfile across drm tree
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/via_dmablit.c')
-rw-r--r--drivers/char/drm/via_dmablit.c184
1 files changed, 92 insertions, 92 deletions
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index c6fd16f3cb43..33c5197b73c4 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -1,5 +1,5 @@
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 * 2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,22 +16,22 @@
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * 23 *
24 * Authors: 24 * Authors:
25 * Thomas Hellstrom. 25 * Thomas Hellstrom.
26 * Partially based on code obtained from Digeo Inc. 26 * Partially based on code obtained from Digeo Inc.
27 */ 27 */
28 28
29 29
30/* 30/*
31 * Unmaps the DMA mappings. 31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also 32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done 33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings? 34 * the same DMA mappings?
35 */ 35 */
36 36
37#include "drmP.h" 37#include "drmP.h"
@@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
65 int num_desc = vsg->num_desc; 65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 descriptor_this_page; 69 descriptor_this_page;
70 dma_addr_t next = vsg->chain_start; 70 dma_addr_t next = vsg->chain_start;
71 71
@@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
73 if (descriptor_this_page-- == 0) { 73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--; 74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1; 75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] + 76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 descriptor_this_page; 77 descriptor_this_page;
78 } 78 }
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
93static void 93static void
94via_map_blit_for_device(struct pci_dev *pdev, 94via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer, 95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg, 96 drm_via_sg_info_t *vsg,
97 int mode) 97 int mode)
98{ 98{
99 unsigned cur_descriptor_page = 0; 99 unsigned cur_descriptor_page = 0;
@@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC; 110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = NULL; 111 drm_via_descriptor_t *desc_ptr = NULL;
112 112
113 if (mode == 1) 113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page]; 114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115 115
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev,
118 line_len = xfer->line_length; 118 line_len = xfer->line_length;
119 cur_fb = fb_addr; 119 cur_fb = fb_addr;
120 cur_mem = mem_addr; 120 cur_mem = mem_addr;
121 121
122 while (line_len > 0) { 122 while (line_len > 0) {
123 123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len; 125 line_len -= remaining_len;
126 126
127 if (mode == 1) { 127 if (mode == 1) {
128 desc_ptr->mem_addr = 128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev, 129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) - 130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)], 131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len, 132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction); 133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb; 134 desc_ptr->dev_addr = cur_fb;
135 135
136 desc_ptr->size = remaining_len; 136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next; 137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE); 139 DMA_TO_DEVICE);
140 desc_ptr++; 140 desc_ptr++;
141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
143 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 } 144 }
145 } 145 }
146 146
147 num_desc++; 147 num_desc++;
148 cur_mem += remaining_len; 148 cur_mem += remaining_len;
149 cur_fb += remaining_len; 149 cur_fb += remaining_len;
150 } 150 }
151 151
152 mem_addr += xfer->mem_stride; 152 mem_addr += xfer->mem_stride;
153 fb_addr += xfer->fb_stride; 153 fb_addr += xfer->fb_stride;
154 } 154 }
@@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
161} 161}
162 162
163/* 163/*
164 * Function that frees up all resources for a blit. It is usable even if the 164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only been partially built as long as the status enum is consistent 165 * blit info has only been partially built as long as the status enum is consistent
166 * with the actual status of the used resources. 166 * with the actual status of the used resources.
167 */ 167 */
168 168
169 169
170static void 170static void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{ 172{
173 struct page *page; 173 struct page *page;
174 int i; 174 int i;
@@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
185 case dr_via_pages_locked: 185 case dr_via_pages_locked:
186 for (i=0; i<vsg->num_pages; ++i) { 186 for (i=0; i<vsg->num_pages; ++i) {
187 if ( NULL != (page = vsg->pages[i])) { 187 if ( NULL != (page = vsg->pages[i])) {
188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 SetPageDirty(page); 189 SetPageDirty(page);
190 page_cache_release(page); 190 page_cache_release(page);
191 } 191 }
@@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
200 vsg->bounce_buffer = NULL; 200 vsg->bounce_buffer = NULL;
201 } 201 }
202 vsg->free_on_sequence = 0; 202 vsg->free_on_sequence = 0;
203} 203}
204 204
205/* 205/*
206 * Fire a blit engine. 206 * Fire a blit engine.
@@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
213 213
214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); 214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); 215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_DMA_CSR_DE); 217 VIA_DMA_CSR_DE);
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
233{ 233{
234 int ret; 234 int ret;
235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
237 first_pfn + 1; 237 first_pfn + 1;
238 238
239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 return -ENOMEM; 240 return -ENOMEM;
241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
@@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
248 248
249 up_read(&current->mm->mmap_sem); 249 up_read(&current->mm->mmap_sem);
250 if (ret != vsg->num_pages) { 250 if (ret != vsg->num_pages) {
251 if (ret < 0) 251 if (ret < 0)
252 return ret; 252 return ret;
253 vsg->state = dr_via_pages_locked; 253 vsg->state = dr_via_pages_locked;
254 return -EINVAL; 254 return -EINVAL;
@@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
264 * quite large for some blits, and pages don't need to be contingous. 264 * quite large for some blits, and pages don't need to be contingous.
265 */ 265 */
266 266
267static int 267static int
268via_alloc_desc_pages(drm_via_sg_info_t *vsg) 268via_alloc_desc_pages(drm_via_sg_info_t *vsg)
269{ 269{
270 int i; 270 int i;
271 271
272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); 272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
273 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 273 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
274 vsg->descriptors_per_page; 274 vsg->descriptors_per_page;
275 275
276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 return -ENOMEM; 277 return -ENOMEM;
278 278
279 vsg->state = dr_via_desc_pages_alloc; 279 vsg->state = dr_via_desc_pages_alloc;
280 for (i=0; i<vsg->num_desc_pages; ++i) { 280 for (i=0; i<vsg->num_desc_pages; ++i) {
281 if (NULL == (vsg->desc_pages[i] = 281 if (NULL == (vsg->desc_pages[i] =
282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 return -ENOMEM; 283 return -ENOMEM;
284 } 284 }
@@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
286 vsg->num_desc); 286 vsg->num_desc);
287 return 0; 287 return 0;
288} 288}
289 289
290static void 290static void
291via_abort_dmablit(struct drm_device *dev, int engine) 291via_abort_dmablit(struct drm_device *dev, int engine)
292{ 292{
@@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
300{ 300{
301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
302 302
303 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 303 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
304} 304}
305 305
306 306
@@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
312 * the workqueue task takes care of processing associated with the old blit. 312 * the workqueue task takes care of processing associated with the old blit.
313 */ 313 */
314 314
315void 315void
316via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) 316via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
317{ 317{
@@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
331 spin_lock_irqsave(&blitq->blit_lock, irqsave); 331 spin_lock_irqsave(&blitq->blit_lock, irqsave);
332 } 332 }
333 333
334 done_transfer = blitq->is_active && 334 done_transfer = blitq->is_active &&
335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
336 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 336 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
337 337
338 cur = blitq->cur; 338 cur = blitq->cur;
339 if (done_transfer) { 339 if (done_transfer) {
340 340
341 blitq->blits[cur]->aborted = blitq->aborting; 341 blitq->blits[cur]->aborted = blitq->aborting;
342 blitq->done_blit_handle++; 342 blitq->done_blit_handle++;
343 DRM_WAKEUP(blitq->blit_queue + cur); 343 DRM_WAKEUP(blitq->blit_queue + cur);
344 344
345 cur++; 345 cur++;
346 if (cur >= VIA_NUM_BLIT_SLOTS) 346 if (cur >= VIA_NUM_BLIT_SLOTS)
347 cur = 0; 347 cur = 0;
348 blitq->cur = cur; 348 blitq->cur = cur;
349 349
@@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
355 355
356 blitq->is_active = 0; 356 blitq->is_active = 0;
357 blitq->aborting = 0; 357 blitq->aborting = 0;
358 schedule_work(&blitq->wq); 358 schedule_work(&blitq->wq);
359 359
360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
361 361
@@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
367 blitq->aborting = 1; 367 blitq->aborting = 1;
368 blitq->end = jiffies + DRM_HZ; 368 blitq->end = jiffies + DRM_HZ;
369 } 369 }
370 370
371 if (!blitq->is_active) { 371 if (!blitq->is_active) {
372 if (blitq->num_outstanding) { 372 if (blitq->num_outstanding) {
373 via_fire_dmablit(dev, blitq->blits[cur], engine); 373 via_fire_dmablit(dev, blitq->blits[cur], engine);
@@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
383 } 383 }
384 via_dmablit_engine_off(dev, engine); 384 via_dmablit_engine_off(dev, engine);
385 } 385 }
386 } 386 }
387 387
388 if (from_irq) { 388 if (from_irq) {
389 spin_unlock(&blitq->blit_lock); 389 spin_unlock(&blitq->blit_lock);
390 } else { 390 } else {
391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
392 } 392 }
393} 393}
394 394
395 395
396 396
@@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
426 426
427 return active; 427 return active;
428} 428}
429 429
430/* 430/*
431 * Sync. Wait for at least three seconds for the blit to be performed. 431 * Sync. Wait for at least three seconds for the blit to be performed.
432 */ 432 */
433 433
434static int 434static int
435via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) 435via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
436{ 436{
437 437
438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
441 int ret = 0; 441 int ret = 0;
442 442
443 if (via_dmablit_active(blitq, engine, handle, &queue)) { 443 if (via_dmablit_active(blitq, engine, handle, &queue)) {
444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
445 !via_dmablit_active(blitq, engine, handle, NULL)); 445 !via_dmablit_active(blitq, engine, handle, NULL));
446 } 446 }
447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
448 handle, engine, ret); 448 handle, engine, ret);
449 449
450 return ret; 450 return ret;
451} 451}
452 452
@@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
468 struct drm_device *dev = blitq->dev; 468 struct drm_device *dev = blitq->dev;
469 int engine = (int) 469 int engine = (int)
470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
471 471
472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
473 (unsigned long) jiffies); 473 (unsigned long) jiffies);
474 474
475 via_dmablit_handler(dev, engine, 0); 475 via_dmablit_handler(dev, engine, 0);
476 476
477 if (!timer_pending(&blitq->poll_timer)) { 477 if (!timer_pending(&blitq->poll_timer)) {
478 mod_timer(&blitq->poll_timer, jiffies + 1); 478 mod_timer(&blitq->poll_timer, jiffies + 1);
479 479
@@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
497 */ 497 */
498 498
499 499
500static void 500static void
501via_dmablit_workqueue(struct work_struct *work) 501via_dmablit_workqueue(struct work_struct *work)
502{ 502{
503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
@@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work)
505 unsigned long irqsave; 505 unsigned long irqsave;
506 drm_via_sg_info_t *cur_sg; 506 drm_via_sg_info_t *cur_sg;
507 int cur_released; 507 int cur_released;
508 508
509 509
510 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 510 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
512 512
513 spin_lock_irqsave(&blitq->blit_lock, irqsave); 513 spin_lock_irqsave(&blitq->blit_lock, irqsave);
514 514
515 while(blitq->serviced != blitq->cur) { 515 while(blitq->serviced != blitq->cur) {
516 516
517 cur_released = blitq->serviced++; 517 cur_released = blitq->serviced++;
518 518
519 DRM_DEBUG("Releasing blit slot %d\n", cur_released); 519 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
520 520
521 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 521 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
522 blitq->serviced = 0; 522 blitq->serviced = 0;
523 523
524 cur_sg = blitq->blits[cur_released]; 524 cur_sg = blitq->blits[cur_released];
525 blitq->num_free++; 525 blitq->num_free++;
526 526
527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
528 528
529 DRM_WAKEUP(&blitq->busy_queue); 529 DRM_WAKEUP(&blitq->busy_queue);
530 530
531 via_free_sg_info(dev->pdev, cur_sg); 531 via_free_sg_info(dev->pdev, cur_sg);
532 kfree(cur_sg); 532 kfree(cur_sg);
533 533
534 spin_lock_irqsave(&blitq->blit_lock, irqsave); 534 spin_lock_irqsave(&blitq->blit_lock, irqsave);
535 } 535 }
536 536
537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
538} 538}
539 539
540 540
541/* 541/*
542 * Init all blit engines. Currently we use two, but some hardware have 4. 542 * Init all blit engines. Currently we use two, but some hardware have 4.
@@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev)
550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
551 drm_via_blitq_t *blitq; 551 drm_via_blitq_t *blitq;
552 552
553 pci_set_master(dev->pdev); 553 pci_set_master(dev->pdev);
554 554
555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { 555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
556 blitq = dev_priv->blit_queues + i; 556 blitq = dev_priv->blit_queues + i;
557 blitq->dev = dev; 557 blitq->dev = dev;
@@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev)
572 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 572 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
573 setup_timer(&blitq->poll_timer, via_dmablit_timer, 573 setup_timer(&blitq->poll_timer, via_dmablit_timer,
574 (unsigned long)blitq); 574 (unsigned long)blitq);
575 } 575 }
576} 576}
577 577
578/* 578/*
579 * Build all info and do all mappings required for a blit. 579 * Build all info and do all mappings required for a blit.
580 */ 580 */
581 581
582 582
583static int 583static int
584via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 584via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
585{ 585{
586 int draw = xfer->to_fb; 586 int draw = xfer->to_fb;
587 int ret = 0; 587 int ret = 0;
588 588
589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
590 vsg->bounce_buffer = NULL; 590 vsg->bounce_buffer = NULL;
591 591
@@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
599 /* 599 /*
600 * Below check is a driver limitation, not a hardware one. We 600 * Below check is a driver limitation, not a hardware one. We
601 * don't want to lock unused pages, and don't want to incoporate the 601 * don't want to lock unused pages, and don't want to incoporate the
602 * extra logic of avoiding them. Make sure there are no. 602 * extra logic of avoiding them. Make sure there are no.
603 * (Not a big limitation anyway.) 603 * (Not a big limitation anyway.)
604 */ 604 */
605 605
@@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 DRM_ERROR("Too large PCI DMA bitblt.\n"); 626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 return -EINVAL; 627 return -EINVAL;
628 } 628 }
629 629
630 /* 630 /*
631 * we allow a negative fb stride to allow flipping of images in 631 * we allow a negative fb stride to allow flipping of images in
632 * transfer. 632 * transfer.
633 */ 633 */
634 634
635 if (xfer->mem_stride < xfer->line_length || 635 if (xfer->mem_stride < xfer->line_length ||
@@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
653#else 653#else
654 if ((((unsigned long)xfer->mem_addr & 15) || 654 if ((((unsigned long)xfer->mem_addr & 15) ||
655 ((unsigned long)xfer->fb_addr & 3)) || 655 ((unsigned long)xfer->fb_addr & 3)) ||
656 ((xfer->num_lines > 1) && 656 ((xfer->num_lines > 1) &&
657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 return -EINVAL; 659 return -EINVAL;
660 } 660 }
661#endif 661#endif
662 662
663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { 663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
673 return ret; 673 return ret;
674 } 674 }
675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
676 676
677 return 0; 677 return 0;
678} 678}
679 679
680 680
681/* 681/*
682 * Reserve one free slot in the blit queue. Will wait for one second for one 682 * Reserve one free slot in the blit queue. Will wait for one second for one
683 * to become available. Otherwise -EBUSY is returned. 683 * to become available. Otherwise -EBUSY is returned.
684 */ 684 */
685 685
686static int 686static int
687via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 687via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
688{ 688{
689 int ret=0; 689 int ret=0;
@@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
698 if (ret) { 698 if (ret) {
699 return (-EINTR == ret) ? -EAGAIN : ret; 699 return (-EINTR == ret) ? -EAGAIN : ret;
700 } 700 }
701 701
702 spin_lock_irqsave(&blitq->blit_lock, irqsave); 702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
703 } 703 }
704 704
705 blitq->num_free--; 705 blitq->num_free--;
706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
707 707
@@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
712 * Hand back a free slot if we changed our mind. 712 * Hand back a free slot if we changed our mind.
713 */ 713 */
714 714
715static void 715static void
716via_dmablit_release_slot(drm_via_blitq_t *blitq) 716via_dmablit_release_slot(drm_via_blitq_t *blitq)
717{ 717{
718 unsigned long irqsave; 718 unsigned long irqsave;
@@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
728 */ 728 */
729 729
730 730
731static int 731static int
732via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) 732via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
733{ 733{
734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
735 drm_via_sg_info_t *vsg; 735 drm_via_sg_info_t *vsg;
@@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
760 spin_lock_irqsave(&blitq->blit_lock, irqsave); 760 spin_lock_irqsave(&blitq->blit_lock, irqsave);
761 761
762 blitq->blits[blitq->head++] = vsg; 762 blitq->blits[blitq->head++] = vsg;
763 if (blitq->head >= VIA_NUM_BLIT_SLOTS) 763 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
764 blitq->head = 0; 764 blitq->head = 0;
765 blitq->num_outstanding++; 765 blitq->num_outstanding++;
766 xfer->sync.sync_handle = ++blitq->cur_blit_handle; 766 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
767 767
768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
769 xfer->sync.engine = engine; 769 xfer->sync.engine = engine;
770 770
771 via_dmablit_handler(dev, engine, 0); 771 via_dmablit_handler(dev, engine, 0);
772 772
773 return 0; 773 return 0;
774} 774}
@@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
776/* 776/*
777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that 778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
779 * case it returns with -EAGAIN for the signal to be delivered. 779 * case it returns with -EAGAIN for the signal to be delivered.
780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
781 */ 781 */
782 782
@@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
786 drm_via_blitsync_t *sync = data; 786 drm_via_blitsync_t *sync = data;
787 int err; 787 int err;
788 788
789 if (sync->engine >= VIA_NUM_BLIT_ENGINES) 789 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
790 return -EINVAL; 790 return -EINVAL;
791 791
792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
796 796
797 return err; 797 return err;
798} 798}
799 799
800 800
801/* 801/*
802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
803 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 803 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
804 * be reissued. See the above IOCTL code. 804 * be reissued. See the above IOCTL code.
805 */ 805 */
806 806
807int 807int
808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) 808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
809{ 809{
810 drm_via_dmablit_t *xfer = data; 810 drm_via_dmablit_t *xfer = data;