aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2006-08-19 03:40:50 -0400
committerDave Airlie <airlied@linux.ie>2006-09-21 15:32:33 -0400
commitd40c8533a5b8ca1887f81ae1c81136f3c40a8488 (patch)
tree53790b7313dbab36de615cf369949438cfb06010 /drivers/char/drm
parent1f4eccfdb2a5f8b2751aea8cf2d6b00401c156e0 (diff)
drm: realign via driver with drm git tree
This just realigns some code/whitespace between the kernel and main tree Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm')
-rw-r--r--drivers/char/drm/via_dmablit.c68
-rw-r--r--drivers/char/drm/via_drm.h8
-rw-r--r--drivers/char/drm/via_mm.c4
3 files changed, 54 insertions, 26 deletions
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 78a81a4a99c5..60c1695db300 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -41,9 +41,9 @@
41 41
42#include <linux/pagemap.h> 42#include <linux/pagemap.h>
43 43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47 47
48typedef struct _drm_via_descriptor { 48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr; 49 uint32_t mem_addr;
@@ -121,19 +121,19 @@ via_map_blit_for_device(struct pci_dev *pdev,
121 121
122 while (line_len > 0) { 122 while (line_len > 0) {
123 123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len; 125 line_len -= remaining_len;
126 126
127 if (mode == 1) { 127 if (mode == 1) {
128 desc_ptr->mem_addr = 128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev, 129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) - 130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)], 131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len, 132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction); 133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb; 134 desc_ptr->dev_addr = cur_fb;
135 135
136 desc_ptr->size = remaining_len; 136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next; 137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE); 139 DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
162 162
163/* 163/*
164 * Function that frees up all resources for a blit. It is usable even if the 164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only be partially built as long as the status enum is consistent 165 * blit info has only been partially built as long as the status enum is consistent
166 * with the actual status of the used resources. 166 * with the actual status of the used resources.
167 */ 167 */
168 168
@@ -238,8 +238,11 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
238 return DRM_ERR(ENOMEM); 238 return DRM_ERR(ENOMEM);
239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
240 down_read(&current->mm->mmap_sem); 240 down_read(&current->mm->mmap_sem);
241 ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, 241 ret = get_user_pages(current, current->mm,
242 vsg->num_pages, vsg->direction, 0, vsg->pages, NULL); 242 (unsigned long)xfer->mem_addr,
243 vsg->num_pages,
244 (vsg->direction == DMA_FROM_DEVICE),
245 0, vsg->pages, NULL);
243 246
244 up_read(&current->mm->mmap_sem); 247 up_read(&current->mm->mmap_sem);
245 if (ret != vsg->num_pages) { 248 if (ret != vsg->num_pages) {
@@ -475,9 +478,15 @@ via_dmablit_timer(unsigned long data)
475 if (!timer_pending(&blitq->poll_timer)) { 478 if (!timer_pending(&blitq->poll_timer)) {
476 blitq->poll_timer.expires = jiffies+1; 479 blitq->poll_timer.expires = jiffies+1;
477 add_timer(&blitq->poll_timer); 480 add_timer(&blitq->poll_timer);
478 }
479 via_dmablit_handler(dev, engine, 0);
480 481
482 /*
483 * Rerun handler to delete timer if engines are off, and
484 * to shorten abort latency. This is a little nasty.
485 */
486
487 via_dmablit_handler(dev, engine, 0);
488
489 }
481} 490}
482 491
483 492
@@ -597,15 +606,27 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
597 * (Not a big limitation anyway.) 606 * (Not a big limitation anyway.)
598 */ 607 */
599 608
600 if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || 609 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
601 (xfer->mem_stride > 2048*4)) {
602 DRM_ERROR("Too large system memory stride. Stride: %d, " 610 DRM_ERROR("Too large system memory stride. Stride: %d, "
603 "Length: %d\n", xfer->mem_stride, xfer->line_length); 611 "Length: %d\n", xfer->mem_stride, xfer->line_length);
604 return DRM_ERR(EINVAL); 612 return DRM_ERR(EINVAL);
605 } 613 }
606 614
607 if (xfer->num_lines > 2048) { 615 if ((xfer->mem_stride == xfer->line_length) &&
608 DRM_ERROR("Too many PCI DMA bitblt lines.\n"); 616 (xfer->fb_stride == xfer->line_length)) {
617 xfer->mem_stride *= xfer->num_lines;
618 xfer->line_length = xfer->mem_stride;
619 xfer->fb_stride = xfer->mem_stride;
620 xfer->num_lines = 1;
621 }
622
623 /*
624 * Don't lock an arbitrary large number of pages, since that causes a
625 * DOS security hole.
626 */
627
628 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
629 DRM_ERROR("Too large PCI DMA bitblt.\n");
609 return DRM_ERR(EINVAL); 630 return DRM_ERR(EINVAL);
610 } 631 }
611 632
@@ -628,16 +649,17 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
628 649
629#ifdef VIA_BUGFREE 650#ifdef VIA_BUGFREE
630 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 651 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
631 ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { 652 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
632 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 653 DRM_ERROR("Invalid DRM bitblt alignment.\n");
633 return DRM_ERR(EINVAL); 654 return DRM_ERR(EINVAL);
634 } 655 }
635#else 656#else
636 if ((((unsigned long)xfer->mem_addr & 15) || 657 if ((((unsigned long)xfer->mem_addr & 15) ||
637 ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) || 658 ((unsigned long)xfer->fb_addr & 3)) ||
638 (xfer->fb_stride & 3)) { 659 ((xfer->num_lines > 1) &&
660 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
639 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 661 DRM_ERROR("Invalid DRM bitblt alignment.\n");
640 return DRM_ERR(EINVAL); 662 return DRM_ERR(EINVAL);
641 } 663 }
642#endif 664#endif
643 665
@@ -715,7 +737,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
715 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 737 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
716 drm_via_sg_info_t *vsg; 738 drm_via_sg_info_t *vsg;
717 drm_via_blitq_t *blitq; 739 drm_via_blitq_t *blitq;
718 int ret; 740 int ret;
719 int engine; 741 int engine;
720 unsigned long irqsave; 742 unsigned long irqsave;
721 743
@@ -756,7 +778,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
756 778
757/* 779/*
758 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 780 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
759 * that there is a very big proability that this IOCTL will be interrupted by a signal. In that 781 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
760 * case it returns with -EAGAIN for the signal to be delivered. 782 * case it returns with -EAGAIN for the signal to be delivered.
761 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 783 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
762 */ 784 */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index 47f0b5b26379..e4ee97d7156f 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -250,6 +250,12 @@ typedef struct drm_via_blitsync {
250 unsigned engine; 250 unsigned engine;
251} drm_via_blitsync_t; 251} drm_via_blitsync_t;
252 252
253/* - * Below,"flags" is currently unused but will be used for possible future
254 * extensions like kernel space bounce buffers for bad alignments and
255 * blit engine busy-wait polling for better latency in the absence of
256 * interrupts.
257 */
258
253typedef struct drm_via_dmablit { 259typedef struct drm_via_dmablit {
254 uint32_t num_lines; 260 uint32_t num_lines;
255 uint32_t line_length; 261 uint32_t line_length;
@@ -260,7 +266,7 @@ typedef struct drm_via_dmablit {
260 unsigned char *mem_addr; 266 unsigned char *mem_addr;
261 uint32_t mem_stride; 267 uint32_t mem_stride;
262 268
263 int bounce_buffer; 269 uint32_t flags;
264 int to_fb; 270 int to_fb;
265 271
266 drm_via_blitsync_t sync; 272 drm_via_blitsync_t sync;
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index d6c08e1d768f..2fcf0577a7aa 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -52,7 +52,7 @@ int via_agp_init(DRM_IOCTL_ARGS)
52 return ret; 52 return ret;
53 } 53 }
54 54
55 dev_priv->agp_initialized = TRUE; 55 dev_priv->agp_initialized = 1;
56 dev_priv->agp_offset = agp.offset; 56 dev_priv->agp_offset = agp.offset;
57 mutex_unlock(&dev->struct_mutex); 57 mutex_unlock(&dev->struct_mutex);
58 58
@@ -79,7 +79,7 @@ int via_fb_init(DRM_IOCTL_ARGS)
79 return ret; 79 return ret;
80 } 80 }
81 81
82 dev_priv->vram_initialized = TRUE; 82 dev_priv->vram_initialized = 1;
83 dev_priv->vram_offset = fb.offset; 83 dev_priv->vram_offset = fb.offset;
84 84
85 mutex_unlock(&dev->struct_mutex); 85 mutex_unlock(&dev->struct_mutex);