aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/via/via_dmablit.c
diff options
context:
space:
mode:
authorNicolas Kaiser <nikai@nikai.net>2010-07-11 09:32:42 -0400
committerDave Airlie <airlied@redhat.com>2010-08-01 20:17:23 -0400
commit58c1e85af3645ac8df021dbf14acd215b5687f54 (patch)
tree63ebfd92e6ff5ceceda02f0835bc2a385720f855 /drivers/gpu/drm/via/via_dmablit.c
parent5649911316c89b6cf7963b521ec5e9287f8667a7 (diff)
drm/via: fixed coding style issues, simplified return
Fixed brace, macro and spacing coding style issues. Simplified -if (ret) return ret; -return 0; +return ret; Signed-off-by: Nicolas Kaiser <nikai@nikai.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/via/via_dmablit.c')
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c71
1 files changed, 32 insertions, 39 deletions
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 4c54f043068e..9b5b4d9dd62c 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -70,7 +70,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
70 descriptor_this_page; 70 descriptor_this_page;
71 dma_addr_t next = vsg->chain_start; 71 dma_addr_t next = vsg->chain_start;
72 72
73 while(num_desc--) { 73 while (num_desc--) {
74 if (descriptor_this_page-- == 0) { 74 if (descriptor_this_page-- == 0) {
75 cur_descriptor_page--; 75 cur_descriptor_page--;
76 descriptor_this_page = vsg->descriptors_per_page - 1; 76 descriptor_this_page = vsg->descriptors_per_page - 1;
@@ -174,19 +174,19 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
174 struct page *page; 174 struct page *page;
175 int i; 175 int i;
176 176
177 switch(vsg->state) { 177 switch (vsg->state) {
178 case dr_via_device_mapped: 178 case dr_via_device_mapped:
179 via_unmap_blit_from_device(pdev, vsg); 179 via_unmap_blit_from_device(pdev, vsg);
180 case dr_via_desc_pages_alloc: 180 case dr_via_desc_pages_alloc:
181 for (i=0; i<vsg->num_desc_pages; ++i) { 181 for (i = 0; i < vsg->num_desc_pages; ++i) {
182 if (vsg->desc_pages[i] != NULL) 182 if (vsg->desc_pages[i] != NULL)
183 free_page((unsigned long)vsg->desc_pages[i]); 183 free_page((unsigned long)vsg->desc_pages[i]);
184 } 184 }
185 kfree(vsg->desc_pages); 185 kfree(vsg->desc_pages);
186 case dr_via_pages_locked: 186 case dr_via_pages_locked:
187 for (i=0; i<vsg->num_pages; ++i) { 187 for (i = 0; i < vsg->num_pages; ++i) {
188 if ( NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 page_cache_release(page);
192 } 192 }
@@ -232,7 +232,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
232{ 232{
233 int ret; 233 int ret;
234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
@@ -268,7 +268,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
268{ 268{
269 int i; 269 int i;
270 270
271 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); 271 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 272 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
273 vsg->descriptors_per_page; 273 vsg->descriptors_per_page;
274 274
@@ -276,7 +276,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
276 return -ENOMEM; 276 return -ENOMEM;
277 277
278 vsg->state = dr_via_desc_pages_alloc; 278 vsg->state = dr_via_desc_pages_alloc;
279 for (i=0; i<vsg->num_desc_pages; ++i) { 279 for (i = 0; i < vsg->num_desc_pages; ++i) {
280 if (NULL == (vsg->desc_pages[i] = 280 if (NULL == (vsg->desc_pages[i] =
281 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 281 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
282 return -ENOMEM; 282 return -ENOMEM;
@@ -318,21 +318,20 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
318 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 318 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
319 int cur; 319 int cur;
320 int done_transfer; 320 int done_transfer;
321 unsigned long irqsave=0; 321 unsigned long irqsave = 0;
322 uint32_t status = 0; 322 uint32_t status = 0;
323 323
324 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 324 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
325 engine, from_irq, (unsigned long) blitq); 325 engine, from_irq, (unsigned long) blitq);
326 326
327 if (from_irq) { 327 if (from_irq)
328 spin_lock(&blitq->blit_lock); 328 spin_lock(&blitq->blit_lock);
329 } else { 329 else
330 spin_lock_irqsave(&blitq->blit_lock, irqsave); 330 spin_lock_irqsave(&blitq->blit_lock, irqsave);
331 }
332 331
333 done_transfer = blitq->is_active && 332 done_transfer = blitq->is_active &&
334 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 333 ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
335 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 334 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
336 335
337 cur = blitq->cur; 336 cur = blitq->cur;
338 if (done_transfer) { 337 if (done_transfer) {
@@ -377,18 +376,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
377 if (!timer_pending(&blitq->poll_timer)) 376 if (!timer_pending(&blitq->poll_timer))
378 mod_timer(&blitq->poll_timer, jiffies + 1); 377 mod_timer(&blitq->poll_timer, jiffies + 1);
379 } else { 378 } else {
380 if (timer_pending(&blitq->poll_timer)) { 379 if (timer_pending(&blitq->poll_timer))
381 del_timer(&blitq->poll_timer); 380 del_timer(&blitq->poll_timer);
382 }
383 via_dmablit_engine_off(dev, engine); 381 via_dmablit_engine_off(dev, engine);
384 } 382 }
385 } 383 }
386 384
387 if (from_irq) { 385 if (from_irq)
388 spin_unlock(&blitq->blit_lock); 386 spin_unlock(&blitq->blit_lock);
389 } else { 387 else
390 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 388 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
391 }
392} 389}
393 390
394 391
@@ -414,10 +411,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
414 ((blitq->cur_blit_handle - handle) <= (1 << 23)); 411 ((blitq->cur_blit_handle - handle) <= (1 << 23));
415 412
416 if (queue && active) { 413 if (queue && active) {
417 slot = handle - blitq->done_blit_handle + blitq->cur -1; 414 slot = handle - blitq->done_blit_handle + blitq->cur - 1;
418 if (slot >= VIA_NUM_BLIT_SLOTS) { 415 if (slot >= VIA_NUM_BLIT_SLOTS)
419 slot -= VIA_NUM_BLIT_SLOTS; 416 slot -= VIA_NUM_BLIT_SLOTS;
420 }
421 *queue = blitq->blit_queue + slot; 417 *queue = blitq->blit_queue + slot;
422 } 418 }
423 419
@@ -506,12 +502,12 @@ via_dmablit_workqueue(struct work_struct *work)
506 int cur_released; 502 int cur_released;
507 503
508 504
509 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 505 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
510 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 506 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
511 507
512 spin_lock_irqsave(&blitq->blit_lock, irqsave); 508 spin_lock_irqsave(&blitq->blit_lock, irqsave);
513 509
514 while(blitq->serviced != blitq->cur) { 510 while (blitq->serviced != blitq->cur) {
515 511
516 cur_released = blitq->serviced++; 512 cur_released = blitq->serviced++;
517 513
@@ -545,13 +541,13 @@ via_dmablit_workqueue(struct work_struct *work)
545void 541void
546via_init_dmablit(struct drm_device *dev) 542via_init_dmablit(struct drm_device *dev)
547{ 543{
548 int i,j; 544 int i, j;
549 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 545 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
550 drm_via_blitq_t *blitq; 546 drm_via_blitq_t *blitq;
551 547
552 pci_set_master(dev->pdev); 548 pci_set_master(dev->pdev);
553 549
554 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { 550 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
555 blitq = dev_priv->blit_queues + i; 551 blitq = dev_priv->blit_queues + i;
556 blitq->dev = dev; 552 blitq->dev = dev;
557 blitq->cur_blit_handle = 0; 553 blitq->cur_blit_handle = 0;
@@ -564,9 +560,8 @@ via_init_dmablit(struct drm_device *dev)
564 blitq->is_active = 0; 560 blitq->is_active = 0;
565 blitq->aborting = 0; 561 blitq->aborting = 0;
566 spin_lock_init(&blitq->blit_lock); 562 spin_lock_init(&blitq->blit_lock);
567 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { 563 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
568 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 564 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
569 }
570 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 565 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
571 INIT_WORK(&blitq->wq, via_dmablit_workqueue); 566 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
572 setup_timer(&blitq->poll_timer, via_dmablit_timer, 567 setup_timer(&blitq->poll_timer, via_dmablit_timer,
@@ -685,18 +680,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
685static int 680static int
686via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 681via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
687{ 682{
688 int ret=0; 683 int ret = 0;
689 unsigned long irqsave; 684 unsigned long irqsave;
690 685
691 DRM_DEBUG("Num free is %d\n", blitq->num_free); 686 DRM_DEBUG("Num free is %d\n", blitq->num_free);
692 spin_lock_irqsave(&blitq->blit_lock, irqsave); 687 spin_lock_irqsave(&blitq->blit_lock, irqsave);
693 while(blitq->num_free == 0) { 688 while (blitq->num_free == 0) {
694 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 689 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
695 690
696 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 691 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
697 if (ret) { 692 if (ret)
698 return (-EINTR == ret) ? -EAGAIN : ret; 693 return (-EINTR == ret) ? -EAGAIN : ret;
699 }
700 694
701 spin_lock_irqsave(&blitq->blit_lock, irqsave); 695 spin_lock_irqsave(&blitq->blit_lock, irqsave);
702 } 696 }
@@ -719,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
719 spin_lock_irqsave(&blitq->blit_lock, irqsave); 713 spin_lock_irqsave(&blitq->blit_lock, irqsave);
720 blitq->num_free++; 714 blitq->num_free++;
721 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 715 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
722 DRM_WAKEUP( &blitq->busy_queue ); 716 DRM_WAKEUP(&blitq->busy_queue);
723} 717}
724 718
725/* 719/*
@@ -744,9 +738,8 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
744 738
745 engine = (xfer->to_fb) ? 0 : 1; 739 engine = (xfer->to_fb) ? 0 : 1;
746 blitq = dev_priv->blit_queues + engine; 740 blitq = dev_priv->blit_queues + engine;
747 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { 741 if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
748 return ret; 742 return ret;
749 }
750 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 743 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
751 via_dmablit_release_slot(blitq); 744 via_dmablit_release_slot(blitq);
752 return -ENOMEM; 745 return -ENOMEM;
@@ -780,7 +773,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
780 */ 773 */
781 774
782int 775int
783via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) 776via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
784{ 777{
785 drm_via_blitsync_t *sync = data; 778 drm_via_blitsync_t *sync = data;
786 int err; 779 int err;
@@ -804,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
804 */ 797 */
805 798
806int 799int
807via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) 800via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
808{ 801{
809 drm_via_dmablit_t *xfer = data; 802 drm_via_dmablit_t *xfer = data;
810 int err; 803 int err;