aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2011-01-25 17:17:16 -0500
committerDave Airlie <airlied@redhat.com>2011-02-06 21:14:18 -0500
commit7f50684717511d30bba180902105c4cd4efca732 (patch)
tree5383f403f9a1493afc021ce7d5bd5a85df9a17e0
parent8dbdea8444d303a772bceb1ba963f0e3273bfc5e (diff)
drm: remove i830 driver
This driver is one of the last users of the big kernel lock, which is going away. All the hardware supported by this driver also works with the newer i915 driver, and recent X.org releases only work with that driver anyway. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/Kconfig43
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1560
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c107
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h295
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/i830_drm.h342
9 files changed, 17 insertions, 2526 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0902d446003..44588764a52 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -80,25 +80,10 @@ config DRM_I810
80 selected, the module will be called i810. AGP support is required 80 selected, the module will be called i810. AGP support is required
81 for this driver to work. 81 for this driver to work.
82 82
83choice
84 prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
85 depends on DRM && AGP && AGP_INTEL
86 optional
87
88config DRM_I830
89 tristate "i830 driver"
90 # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
91 depends on BKL
92 help
93 Choose this option if you have a system that has Intel 830M, 845G,
94 852GM, 855GM or 865G integrated graphics. If M is selected, the
95 module will be called i830. AGP support is required for this driver
96 to work. This driver is used by the older X releases X.org 6.7 and
97 XFree86 4.3. If unsure, build this and i915 as modules and the X server
98 will load the correct one.
99
100config DRM_I915 83config DRM_I915
101 tristate "i915 driver" 84 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
85 depends on DRM
86 depends on AGP
102 depends on AGP_INTEL 87 depends on AGP_INTEL
103 # we need shmfs for the swappable backing store, and in particular 88 # we need shmfs for the swappable backing store, and in particular
104 # the shmem_readpage() which depends upon tmpfs 89 # the shmem_readpage() which depends upon tmpfs
@@ -115,12 +100,20 @@ config DRM_I915
115 select ACPI_VIDEO if ACPI 100 select ACPI_VIDEO if ACPI
116 select ACPI_BUTTON if ACPI 101 select ACPI_BUTTON if ACPI
117 help 102 help
118 Choose this option if you have a system that has Intel 830M, 845G, 103 Choose this option if you have a system that has "Intel Graphics
119 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 104 Media Accelerator" or "HD Graphics" integrated graphics,
120 module will be called i915. AGP support is required for this driver 105 including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
121 to work. This driver is used by the Intel driver in X.org 6.8 and 106 G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
122 XFree86 4.4 and above. If unsure, build this and i830 as modules and 107 Core i5, Core i7 as well as Atom CPUs with integrated graphics.
123 the X server will load the correct one. 108 If M is selected, the module will be called i915. AGP support
109 is required for this driver to work. This driver is used by
110 the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
111 replaces the older i830 module that supported a subset of the
112 hardware in older X.org releases.
113
114 Note that the older i810/i815 chipsets require the use of the
115 i810 driver instead, and the Atom z5xx series has an entirely
116 different implementation.
124 117
125config DRM_I915_KMS 118config DRM_I915_KMS
126 bool "Enable modesetting on intel by default" 119 bool "Enable modesetting on intel by default"
@@ -132,8 +125,6 @@ config DRM_I915_KMS
132 the driver to bind to PCI devices, which precludes loading things 125 the driver to bind to PCI devices, which precludes loading things
133 like intelfb. 126 like intelfb.
134 127
135endchoice
136
137config DRM_MGA 128config DRM_MGA
138 tristate "Matrox g200/g400" 129 tristate "Matrox g200/g400"
139 depends on DRM && PCI 130 depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d0490..d9cb3e31432 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
29obj-$(CONFIG_DRM_RADEON)+= radeon/ 29obj-$(CONFIG_DRM_RADEON)+= radeon/
30obj-$(CONFIG_DRM_MGA) += mga/ 30obj-$(CONFIG_DRM_MGA) += mga/
31obj-$(CONFIG_DRM_I810) += i810/ 31obj-$(CONFIG_DRM_I810) += i810/
32obj-$(CONFIG_DRM_I830) += i830/
33obj-$(CONFIG_DRM_I915) += i915/ 32obj-$(CONFIG_DRM_I915) += i915/
34obj-$(CONFIG_DRM_SIS) += sis/ 33obj-$(CONFIG_DRM_SIS) += sis/
35obj-$(CONFIG_DRM_SAVAGE)+= savage/ 34obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
deleted file mode 100644
index c642ee0b238..00000000000
--- a/drivers/gpu/drm/i830/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i830-y := i830_drv.o i830_dma.o i830_irq.o
7
8obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index ca6f31ff0ee..00000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1560 +0,0 @@
1/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Abraham vd Merwe <abraham@2d3d.co.za>
31 *
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "i830_drm.h"
37#include "i830_drv.h"
38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/smp_lock.h>
40#include <linux/pagemap.h>
41#include <linux/delay.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44
45#define I830_BUF_FREE 2
46#define I830_BUF_CLIENT 1
47#define I830_BUF_HARDWARE 0
48
49#define I830_BUF_UNMAPPED 0
50#define I830_BUF_MAPPED 1
51
52static struct drm_buf *i830_freelist_get(struct drm_device * dev)
53{
54 struct drm_device_dma *dma = dev->dma;
55 int i;
56 int used;
57
58 /* Linear search might not be the best solution */
59
60 for (i = 0; i < dma->buf_count; i++) {
61 struct drm_buf *buf = dma->buflist[i];
62 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
63 /* In use is already a pointer */
64 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
65 I830_BUF_CLIENT);
66 if (used == I830_BUF_FREE)
67 return buf;
68 }
69 return NULL;
70}
71
72/* This should only be called if the buffer is not sent to the hardware
73 * yet, the hardware updates in use for us once its on the ring buffer.
74 */
75
76static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
77{
78 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
79 int used;
80
81 /* In use is already a pointer */
82 used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
83 if (used != I830_BUF_CLIENT) {
84 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
85 return -EINVAL;
86 }
87
88 return 0;
89}
90
91static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
92{
93 struct drm_file *priv = filp->private_data;
94 struct drm_device *dev;
95 drm_i830_private_t *dev_priv;
96 struct drm_buf *buf;
97 drm_i830_buf_priv_t *buf_priv;
98
99 lock_kernel();
100 dev = priv->minor->dev;
101 dev_priv = dev->dev_private;
102 buf = dev_priv->mmap_buffer;
103 buf_priv = buf->dev_private;
104
105 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
106 vma->vm_file = filp;
107
108 buf_priv->currently_mapped = I830_BUF_MAPPED;
109 unlock_kernel();
110
111 if (io_remap_pfn_range(vma, vma->vm_start,
112 vma->vm_pgoff,
113 vma->vm_end - vma->vm_start, vma->vm_page_prot))
114 return -EAGAIN;
115 return 0;
116}
117
118static const struct file_operations i830_buffer_fops = {
119 .open = drm_open,
120 .release = drm_release,
121 .unlocked_ioctl = i830_ioctl,
122 .mmap = i830_mmap_buffers,
123 .fasync = drm_fasync,
124 .llseek = noop_llseek,
125};
126
127static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
128{
129 struct drm_device *dev = file_priv->minor->dev;
130 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
131 drm_i830_private_t *dev_priv = dev->dev_private;
132 const struct file_operations *old_fops;
133 unsigned long virtual;
134 int retcode = 0;
135
136 if (buf_priv->currently_mapped == I830_BUF_MAPPED)
137 return -EINVAL;
138
139 down_write(&current->mm->mmap_sem);
140 old_fops = file_priv->filp->f_op;
141 file_priv->filp->f_op = &i830_buffer_fops;
142 dev_priv->mmap_buffer = buf;
143 virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
144 MAP_SHARED, buf->bus_address);
145 dev_priv->mmap_buffer = NULL;
146 file_priv->filp->f_op = old_fops;
147 if (IS_ERR((void *)virtual)) { /* ugh */
148 /* Real error */
149 DRM_ERROR("mmap error\n");
150 retcode = PTR_ERR((void *)virtual);
151 buf_priv->virtual = NULL;
152 } else {
153 buf_priv->virtual = (void __user *)virtual;
154 }
155 up_write(&current->mm->mmap_sem);
156
157 return retcode;
158}
159
160static int i830_unmap_buffer(struct drm_buf *buf)
161{
162 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
163 int retcode = 0;
164
165 if (buf_priv->currently_mapped != I830_BUF_MAPPED)
166 return -EINVAL;
167
168 down_write(&current->mm->mmap_sem);
169 retcode = do_munmap(current->mm,
170 (unsigned long)buf_priv->virtual,
171 (size_t) buf->total);
172 up_write(&current->mm->mmap_sem);
173
174 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
175 buf_priv->virtual = NULL;
176
177 return retcode;
178}
179
180static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
181 struct drm_file *file_priv)
182{
183 struct drm_buf *buf;
184 drm_i830_buf_priv_t *buf_priv;
185 int retcode = 0;
186
187 buf = i830_freelist_get(dev);
188 if (!buf) {
189 retcode = -ENOMEM;
190 DRM_DEBUG("retcode=%d\n", retcode);
191 return retcode;
192 }
193
194 retcode = i830_map_buffer(buf, file_priv);
195 if (retcode) {
196 i830_freelist_put(dev, buf);
197 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
198 return retcode;
199 }
200 buf->file_priv = file_priv;
201 buf_priv = buf->dev_private;
202 d->granted = 1;
203 d->request_idx = buf->idx;
204 d->request_size = buf->total;
205 d->virtual = buf_priv->virtual;
206
207 return retcode;
208}
209
210static int i830_dma_cleanup(struct drm_device *dev)
211{
212 struct drm_device_dma *dma = dev->dma;
213
214 /* Make sure interrupts are disabled here because the uninstall ioctl
215 * may not have been called from userspace and after dev_private
216 * is freed, it's too late.
217 */
218 if (dev->irq_enabled)
219 drm_irq_uninstall(dev);
220
221 if (dev->dev_private) {
222 int i;
223 drm_i830_private_t *dev_priv =
224 (drm_i830_private_t *) dev->dev_private;
225
226 if (dev_priv->ring.virtual_start)
227 drm_core_ioremapfree(&dev_priv->ring.map, dev);
228 if (dev_priv->hw_status_page) {
229 pci_free_consistent(dev->pdev, PAGE_SIZE,
230 dev_priv->hw_status_page,
231 dev_priv->dma_status_page);
232 /* Need to rewrite hardware status page */
233 I830_WRITE(0x02080, 0x1ffff000);
234 }
235
236 kfree(dev->dev_private);
237 dev->dev_private = NULL;
238
239 for (i = 0; i < dma->buf_count; i++) {
240 struct drm_buf *buf = dma->buflist[i];
241 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
242 if (buf_priv->kernel_virtual && buf->total)
243 drm_core_ioremapfree(&buf_priv->map, dev);
244 }
245 }
246 return 0;
247}
248
249int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
250{
251 drm_i830_private_t *dev_priv = dev->dev_private;
252 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
253 int iters = 0;
254 unsigned long end;
255 unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
256
257 end = jiffies + (HZ * 3);
258 while (ring->space < n) {
259 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
260 ring->space = ring->head - (ring->tail + 8);
261 if (ring->space < 0)
262 ring->space += ring->Size;
263
264 if (ring->head != last_head) {
265 end = jiffies + (HZ * 3);
266 last_head = ring->head;
267 }
268
269 iters++;
270 if (time_before(end, jiffies)) {
271 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
272 DRM_ERROR("lockup\n");
273 goto out_wait_ring;
274 }
275 udelay(1);
276 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
277 }
278
279out_wait_ring:
280 return iters;
281}
282
283static void i830_kernel_lost_context(struct drm_device *dev)
284{
285 drm_i830_private_t *dev_priv = dev->dev_private;
286 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
287
288 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
289 ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
290 ring->space = ring->head - (ring->tail + 8);
291 if (ring->space < 0)
292 ring->space += ring->Size;
293
294 if (ring->head == ring->tail)
295 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
296}
297
298static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
299{
300 struct drm_device_dma *dma = dev->dma;
301 int my_idx = 36;
302 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
303 int i;
304
305 if (dma->buf_count > 1019) {
306 /* Not enough space in the status page for the freelist */
307 return -EINVAL;
308 }
309
310 for (i = 0; i < dma->buf_count; i++) {
311 struct drm_buf *buf = dma->buflist[i];
312 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
313
314 buf_priv->in_use = hw_status++;
315 buf_priv->my_use_idx = my_idx;
316 my_idx += 4;
317
318 *buf_priv->in_use = I830_BUF_FREE;
319
320 buf_priv->map.offset = buf->bus_address;
321 buf_priv->map.size = buf->total;
322 buf_priv->map.type = _DRM_AGP;
323 buf_priv->map.flags = 0;
324 buf_priv->map.mtrr = 0;
325
326 drm_core_ioremap(&buf_priv->map, dev);
327 buf_priv->kernel_virtual = buf_priv->map.handle;
328 }
329 return 0;
330}
331
332static int i830_dma_initialize(struct drm_device *dev,
333 drm_i830_private_t *dev_priv,
334 drm_i830_init_t *init)
335{
336 struct drm_map_list *r_list;
337
338 memset(dev_priv, 0, sizeof(drm_i830_private_t));
339
340 list_for_each_entry(r_list, &dev->maplist, head) {
341 if (r_list->map &&
342 r_list->map->type == _DRM_SHM &&
343 r_list->map->flags & _DRM_CONTAINS_LOCK) {
344 dev_priv->sarea_map = r_list->map;
345 break;
346 }
347 }
348
349 if (!dev_priv->sarea_map) {
350 dev->dev_private = (void *)dev_priv;
351 i830_dma_cleanup(dev);
352 DRM_ERROR("can not find sarea!\n");
353 return -EINVAL;
354 }
355 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
356 if (!dev_priv->mmio_map) {
357 dev->dev_private = (void *)dev_priv;
358 i830_dma_cleanup(dev);
359 DRM_ERROR("can not find mmio map!\n");
360 return -EINVAL;
361 }
362 dev->agp_buffer_token = init->buffers_offset;
363 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
364 if (!dev->agp_buffer_map) {
365 dev->dev_private = (void *)dev_priv;
366 i830_dma_cleanup(dev);
367 DRM_ERROR("can not find dma buffer map!\n");
368 return -EINVAL;
369 }
370
371 dev_priv->sarea_priv = (drm_i830_sarea_t *)
372 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
373
374 dev_priv->ring.Start = init->ring_start;
375 dev_priv->ring.End = init->ring_end;
376 dev_priv->ring.Size = init->ring_size;
377
378 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
379 dev_priv->ring.map.size = init->ring_size;
380 dev_priv->ring.map.type = _DRM_AGP;
381 dev_priv->ring.map.flags = 0;
382 dev_priv->ring.map.mtrr = 0;
383
384 drm_core_ioremap(&dev_priv->ring.map, dev);
385
386 if (dev_priv->ring.map.handle == NULL) {
387 dev->dev_private = (void *)dev_priv;
388 i830_dma_cleanup(dev);
389 DRM_ERROR("can not ioremap virtual address for"
390 " ring buffer\n");
391 return -ENOMEM;
392 }
393
394 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
395
396 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
397
398 dev_priv->w = init->w;
399 dev_priv->h = init->h;
400 dev_priv->pitch = init->pitch;
401 dev_priv->back_offset = init->back_offset;
402 dev_priv->depth_offset = init->depth_offset;
403 dev_priv->front_offset = init->front_offset;
404
405 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
406 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
407 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
408
409 DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
410 DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
411 DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
412 DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
413
414 dev_priv->cpp = init->cpp;
415 /* We are using separate values as placeholders for mechanisms for
416 * private backbuffer/depthbuffer usage.
417 */
418
419 dev_priv->back_pitch = init->back_pitch;
420 dev_priv->depth_pitch = init->depth_pitch;
421 dev_priv->do_boxes = 0;
422 dev_priv->use_mi_batchbuffer_start = 0;
423
424 /* Program Hardware Status Page */
425 dev_priv->hw_status_page =
426 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
427 &dev_priv->dma_status_page);
428 if (!dev_priv->hw_status_page) {
429 dev->dev_private = (void *)dev_priv;
430 i830_dma_cleanup(dev);
431 DRM_ERROR("Can not allocate hardware status page\n");
432 return -ENOMEM;
433 }
434 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
435 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
436
437 I830_WRITE(0x02080, dev_priv->dma_status_page);
438 DRM_DEBUG("Enabled hardware status page\n");
439
440 /* Now we need to init our freelist */
441 if (i830_freelist_init(dev, dev_priv) != 0) {
442 dev->dev_private = (void *)dev_priv;
443 i830_dma_cleanup(dev);
444 DRM_ERROR("Not enough space in the status page for"
445 " the freelist\n");
446 return -ENOMEM;
447 }
448 dev->dev_private = (void *)dev_priv;
449
450 return 0;
451}
452
453static int i830_dma_init(struct drm_device *dev, void *data,
454 struct drm_file *file_priv)
455{
456 drm_i830_private_t *dev_priv;
457 drm_i830_init_t *init = data;
458 int retcode = 0;
459
460 switch (init->func) {
461 case I830_INIT_DMA:
462 dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
463 if (dev_priv == NULL)
464 return -ENOMEM;
465 retcode = i830_dma_initialize(dev, dev_priv, init);
466 break;
467 case I830_CLEANUP_DMA:
468 retcode = i830_dma_cleanup(dev);
469 break;
470 default:
471 retcode = -EINVAL;
472 break;
473 }
474
475 return retcode;
476}
477
478#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
479#define ST1_ENABLE (1<<16)
480#define ST1_MASK (0xffff)
481
482/* Most efficient way to verify state for the i830 is as it is
483 * emitted. Non-conformant state is silently dropped.
484 */
485static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
486{
487 drm_i830_private_t *dev_priv = dev->dev_private;
488 int i, j = 0;
489 unsigned int tmp;
490 RING_LOCALS;
491
492 BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
493
494 for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
495 tmp = code[i];
496 if ((tmp & (7 << 29)) == CMD_3D &&
497 (tmp & (0x1f << 24)) < (0x1d << 24)) {
498 OUT_RING(tmp);
499 j++;
500 } else {
501 DRM_ERROR("Skipping %d\n", i);
502 }
503 }
504
505 OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
506 OUT_RING(code[I830_CTXREG_BLENDCOLR]);
507 j += 2;
508
509 for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
510 tmp = code[i];
511 if ((tmp & (7 << 29)) == CMD_3D &&
512 (tmp & (0x1f << 24)) < (0x1d << 24)) {
513 OUT_RING(tmp);
514 j++;
515 } else {
516 DRM_ERROR("Skipping %d\n", i);
517 }
518 }
519
520 OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
521 OUT_RING(code[I830_CTXREG_MCSB1]);
522 j += 2;
523
524 if (j & 1)
525 OUT_RING(0);
526
527 ADVANCE_LP_RING();
528}
529
530static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
531{
532 drm_i830_private_t *dev_priv = dev->dev_private;
533 int i, j = 0;
534 unsigned int tmp;
535 RING_LOCALS;
536
537 if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
538 (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
539 (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
540
541 BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
542
543 OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
544 OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
545 OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
546 OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
547 OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
548 OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
549
550 for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
551 tmp = code[i];
552 OUT_RING(tmp);
553 j++;
554 }
555
556 if (j & 1)
557 OUT_RING(0);
558
559 ADVANCE_LP_RING();
560 } else
561 printk("rejected packet %x\n", code[0]);
562}
563
564static void i830EmitTexBlendVerified(struct drm_device *dev,
565 unsigned int *code, unsigned int num)
566{
567 drm_i830_private_t *dev_priv = dev->dev_private;
568 int i, j = 0;
569 unsigned int tmp;
570 RING_LOCALS;
571
572 if (!num)
573 return;
574
575 BEGIN_LP_RING(num + 1);
576
577 for (i = 0; i < num; i++) {
578 tmp = code[i];
579 OUT_RING(tmp);
580 j++;
581 }
582
583 if (j & 1)
584 OUT_RING(0);
585
586 ADVANCE_LP_RING();
587}
588
589static void i830EmitTexPalette(struct drm_device *dev,
590 unsigned int *palette, int number, int is_shared)
591{
592 drm_i830_private_t *dev_priv = dev->dev_private;
593 int i;
594 RING_LOCALS;
595
596 return;
597
598 BEGIN_LP_RING(258);
599
600 if (is_shared == 1) {
601 OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
602 MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
603 } else {
604 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
605 }
606 for (i = 0; i < 256; i++)
607 OUT_RING(palette[i]);
608 OUT_RING(0);
609 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
610 */
611}
612
613/* Need to do some additional checking when setting the dest buffer.
614 */
615static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
616{
617 drm_i830_private_t *dev_priv = dev->dev_private;
618 unsigned int tmp;
619 RING_LOCALS;
620
621 BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
622
623 tmp = code[I830_DESTREG_CBUFADDR];
624 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
625 if (((int)outring) & 8) {
626 OUT_RING(0);
627 OUT_RING(0);
628 }
629
630 OUT_RING(CMD_OP_DESTBUFFER_INFO);
631 OUT_RING(BUF_3D_ID_COLOR_BACK |
632 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
633 BUF_3D_USE_FENCE);
634 OUT_RING(tmp);
635 OUT_RING(0);
636
637 OUT_RING(CMD_OP_DESTBUFFER_INFO);
638 OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
639 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
640 OUT_RING(dev_priv->zi1);
641 OUT_RING(0);
642 } else {
643 DRM_ERROR("bad di1 %x (allow %x or %x)\n",
644 tmp, dev_priv->front_di1, dev_priv->back_di1);
645 }
646
647 /* invarient:
648 */
649
650 OUT_RING(GFX_OP_DESTBUFFER_VARS);
651 OUT_RING(code[I830_DESTREG_DV1]);
652
653 OUT_RING(GFX_OP_DRAWRECT_INFO);
654 OUT_RING(code[I830_DESTREG_DR1]);
655 OUT_RING(code[I830_DESTREG_DR2]);
656 OUT_RING(code[I830_DESTREG_DR3]);
657 OUT_RING(code[I830_DESTREG_DR4]);
658
659 /* Need to verify this */
660 tmp = code[I830_DESTREG_SENABLE];
661 if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
662 OUT_RING(tmp);
663 } else {
664 DRM_ERROR("bad scissor enable\n");
665 OUT_RING(0);
666 }
667
668 OUT_RING(GFX_OP_SCISSOR_RECT);
669 OUT_RING(code[I830_DESTREG_SR1]);
670 OUT_RING(code[I830_DESTREG_SR2]);
671 OUT_RING(0);
672
673 ADVANCE_LP_RING();
674}
675
676static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
677{
678 drm_i830_private_t *dev_priv = dev->dev_private;
679 RING_LOCALS;
680
681 BEGIN_LP_RING(2);
682 OUT_RING(GFX_OP_STIPPLE);
683 OUT_RING(code[1]);
684 ADVANCE_LP_RING();
685}
686
687static void i830EmitState(struct drm_device *dev)
688{
689 drm_i830_private_t *dev_priv = dev->dev_private;
690 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
691 unsigned int dirty = sarea_priv->dirty;
692
693 DRM_DEBUG("%s %x\n", __func__, dirty);
694
695 if (dirty & I830_UPLOAD_BUFFERS) {
696 i830EmitDestVerified(dev, sarea_priv->BufferState);
697 sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
698 }
699
700 if (dirty & I830_UPLOAD_CTX) {
701 i830EmitContextVerified(dev, sarea_priv->ContextState);
702 sarea_priv->dirty &= ~I830_UPLOAD_CTX;
703 }
704
705 if (dirty & I830_UPLOAD_TEX0) {
706 i830EmitTexVerified(dev, sarea_priv->TexState[0]);
707 sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
708 }
709
710 if (dirty & I830_UPLOAD_TEX1) {
711 i830EmitTexVerified(dev, sarea_priv->TexState[1]);
712 sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
713 }
714
715 if (dirty & I830_UPLOAD_TEXBLEND0) {
716 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
717 sarea_priv->TexBlendStateWordsUsed[0]);
718 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
719 }
720
721 if (dirty & I830_UPLOAD_TEXBLEND1) {
722 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
723 sarea_priv->TexBlendStateWordsUsed[1]);
724 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
725 }
726
727 if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
728 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
729 } else {
730 if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
731 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
732 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
733 }
734 if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
735 i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
736 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
737 }
738
739 /* 1.3:
740 */
741#if 0
742 if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
743 i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
744 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
745 }
746 if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
747 i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
748 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
749 }
750#endif
751 }
752
753 /* 1.3:
754 */
755 if (dirty & I830_UPLOAD_STIPPLE) {
756 i830EmitStippleVerified(dev, sarea_priv->StippleState);
757 sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
758 }
759
760 if (dirty & I830_UPLOAD_TEX2) {
761 i830EmitTexVerified(dev, sarea_priv->TexState2);
762 sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
763 }
764
765 if (dirty & I830_UPLOAD_TEX3) {
766 i830EmitTexVerified(dev, sarea_priv->TexState3);
767 sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
768 }
769
770 if (dirty & I830_UPLOAD_TEXBLEND2) {
771 i830EmitTexBlendVerified(dev,
772 sarea_priv->TexBlendState2,
773 sarea_priv->TexBlendStateWordsUsed2);
774
775 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
776 }
777
778 if (dirty & I830_UPLOAD_TEXBLEND3) {
779 i830EmitTexBlendVerified(dev,
780 sarea_priv->TexBlendState3,
781 sarea_priv->TexBlendStateWordsUsed3);
782 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
783 }
784}
785
786/* ================================================================
787 * Performance monitoring functions
788 */
789
790static void i830_fill_box(struct drm_device *dev,
791 int x, int y, int w, int h, int r, int g, int b)
792{
793 drm_i830_private_t *dev_priv = dev->dev_private;
794 u32 color;
795 unsigned int BR13, CMD;
796 RING_LOCALS;
797
798 BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
799 CMD = XY_COLOR_BLT_CMD;
800 x += dev_priv->sarea_priv->boxes[0].x1;
801 y += dev_priv->sarea_priv->boxes[0].y1;
802
803 if (dev_priv->cpp == 4) {
804 BR13 |= (1 << 25);
805 CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
806 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
807 } else {
808 color = (((r & 0xf8) << 8) |
809 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
810 }
811
812 BEGIN_LP_RING(6);
813 OUT_RING(CMD);
814 OUT_RING(BR13);
815 OUT_RING((y << 16) | x);
816 OUT_RING(((y + h) << 16) | (x + w));
817
818 if (dev_priv->current_page == 1)
819 OUT_RING(dev_priv->front_offset);
820 else
821 OUT_RING(dev_priv->back_offset);
822
823 OUT_RING(color);
824 ADVANCE_LP_RING();
825}
826
827static void i830_cp_performance_boxes(struct drm_device *dev)
828{
829 drm_i830_private_t *dev_priv = dev->dev_private;
830
831 /* Purple box for page flipping
832 */
833 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
834 i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
835
836 /* Red box if we have to wait for idle at any point
837 */
838 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
839 i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
840
841 /* Blue box: lost context?
842 */
843 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
844 i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
845
846 /* Yellow box for texture swaps
847 */
848 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
849 i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
850
851 /* Green box if hardware never idles (as far as we can tell)
852 */
853 if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
854 i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
855
856 /* Draw bars indicating number of buffers allocated
857 * (not a great measure, easily confused)
858 */
859 if (dev_priv->dma_used) {
860 int bar = dev_priv->dma_used / 10240;
861 if (bar > 100)
862 bar = 100;
863 if (bar < 1)
864 bar = 1;
865 i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
866 dev_priv->dma_used = 0;
867 }
868
869 dev_priv->sarea_priv->perf_boxes = 0;
870}
871
872static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
873 unsigned int clear_color,
874 unsigned int clear_zval,
875 unsigned int clear_depthmask)
876{
877 drm_i830_private_t *dev_priv = dev->dev_private;
878 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
879 int nbox = sarea_priv->nbox;
880 struct drm_clip_rect *pbox = sarea_priv->boxes;
881 int pitch = dev_priv->pitch;
882 int cpp = dev_priv->cpp;
883 int i;
884 unsigned int BR13, CMD, D_CMD;
885 RING_LOCALS;
886
887 if (dev_priv->current_page == 1) {
888 unsigned int tmp = flags;
889
890 flags &= ~(I830_FRONT | I830_BACK);
891 if (tmp & I830_FRONT)
892 flags |= I830_BACK;
893 if (tmp & I830_BACK)
894 flags |= I830_FRONT;
895 }
896
897 i830_kernel_lost_context(dev);
898
899 switch (cpp) {
900 case 2:
901 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
902 D_CMD = CMD = XY_COLOR_BLT_CMD;
903 break;
904 case 4:
905 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
906 CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
907 XY_COLOR_BLT_WRITE_RGB);
908 D_CMD = XY_COLOR_BLT_CMD;
909 if (clear_depthmask & 0x00ffffff)
910 D_CMD |= XY_COLOR_BLT_WRITE_RGB;
911 if (clear_depthmask & 0xff000000)
912 D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
913 break;
914 default:
915 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
916 D_CMD = CMD = XY_COLOR_BLT_CMD;
917 break;
918 }
919
920 if (nbox > I830_NR_SAREA_CLIPRECTS)
921 nbox = I830_NR_SAREA_CLIPRECTS;
922
923 for (i = 0; i < nbox; i++, pbox++) {
924 if (pbox->x1 > pbox->x2 ||
925 pbox->y1 > pbox->y2 ||
926 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
927 continue;
928
929 if (flags & I830_FRONT) {
930 DRM_DEBUG("clear front\n");
931 BEGIN_LP_RING(6);
932 OUT_RING(CMD);
933 OUT_RING(BR13);
934 OUT_RING((pbox->y1 << 16) | pbox->x1);
935 OUT_RING((pbox->y2 << 16) | pbox->x2);
936 OUT_RING(dev_priv->front_offset);
937 OUT_RING(clear_color);
938 ADVANCE_LP_RING();
939 }
940
941 if (flags & I830_BACK) {
942 DRM_DEBUG("clear back\n");
943 BEGIN_LP_RING(6);
944 OUT_RING(CMD);
945 OUT_RING(BR13);
946 OUT_RING((pbox->y1 << 16) | pbox->x1);
947 OUT_RING((pbox->y2 << 16) | pbox->x2);
948 OUT_RING(dev_priv->back_offset);
949 OUT_RING(clear_color);
950 ADVANCE_LP_RING();
951 }
952
953 if (flags & I830_DEPTH) {
954 DRM_DEBUG("clear depth\n");
955 BEGIN_LP_RING(6);
956 OUT_RING(D_CMD);
957 OUT_RING(BR13);
958 OUT_RING((pbox->y1 << 16) | pbox->x1);
959 OUT_RING((pbox->y2 << 16) | pbox->x2);
960 OUT_RING(dev_priv->depth_offset);
961 OUT_RING(clear_zval);
962 ADVANCE_LP_RING();
963 }
964 }
965}
966
967static void i830_dma_dispatch_swap(struct drm_device *dev)
968{
969 drm_i830_private_t *dev_priv = dev->dev_private;
970 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
971 int nbox = sarea_priv->nbox;
972 struct drm_clip_rect *pbox = sarea_priv->boxes;
973 int pitch = dev_priv->pitch;
974 int cpp = dev_priv->cpp;
975 int i;
976 unsigned int CMD, BR13;
977 RING_LOCALS;
978
979 DRM_DEBUG("swapbuffers\n");
980
981 i830_kernel_lost_context(dev);
982
983 if (dev_priv->do_boxes)
984 i830_cp_performance_boxes(dev);
985
986 switch (cpp) {
987 case 2:
988 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
989 CMD = XY_SRC_COPY_BLT_CMD;
990 break;
991 case 4:
992 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
993 CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
994 XY_SRC_COPY_BLT_WRITE_RGB);
995 break;
996 default:
997 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
998 CMD = XY_SRC_COPY_BLT_CMD;
999 break;
1000 }
1001
1002 if (nbox > I830_NR_SAREA_CLIPRECTS)
1003 nbox = I830_NR_SAREA_CLIPRECTS;
1004
1005 for (i = 0; i < nbox; i++, pbox++) {
1006 if (pbox->x1 > pbox->x2 ||
1007 pbox->y1 > pbox->y2 ||
1008 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
1009 continue;
1010
1011 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
1012 pbox->x1, pbox->y1, pbox->x2, pbox->y2);
1013
1014 BEGIN_LP_RING(8);
1015 OUT_RING(CMD);
1016 OUT_RING(BR13);
1017 OUT_RING((pbox->y1 << 16) | pbox->x1);
1018 OUT_RING((pbox->y2 << 16) | pbox->x2);
1019
1020 if (dev_priv->current_page == 0)
1021 OUT_RING(dev_priv->front_offset);
1022 else
1023 OUT_RING(dev_priv->back_offset);
1024
1025 OUT_RING((pbox->y1 << 16) | pbox->x1);
1026 OUT_RING(BR13 & 0xffff);
1027
1028 if (dev_priv->current_page == 0)
1029 OUT_RING(dev_priv->back_offset);
1030 else
1031 OUT_RING(dev_priv->front_offset);
1032
1033 ADVANCE_LP_RING();
1034 }
1035}
1036
1037static void i830_dma_dispatch_flip(struct drm_device *dev)
1038{
1039 drm_i830_private_t *dev_priv = dev->dev_private;
1040 RING_LOCALS;
1041
1042 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
1043 __func__,
1044 dev_priv->current_page,
1045 dev_priv->sarea_priv->pf_current_page);
1046
1047 i830_kernel_lost_context(dev);
1048
1049 if (dev_priv->do_boxes) {
1050 dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
1051 i830_cp_performance_boxes(dev);
1052 }
1053
1054 BEGIN_LP_RING(2);
1055 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
1056 OUT_RING(0);
1057 ADVANCE_LP_RING();
1058
1059 BEGIN_LP_RING(6);
1060 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
1061 OUT_RING(0);
1062 if (dev_priv->current_page == 0) {
1063 OUT_RING(dev_priv->back_offset);
1064 dev_priv->current_page = 1;
1065 } else {
1066 OUT_RING(dev_priv->front_offset);
1067 dev_priv->current_page = 0;
1068 }
1069 OUT_RING(0);
1070 ADVANCE_LP_RING();
1071
1072 BEGIN_LP_RING(2);
1073 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
1074 OUT_RING(0);
1075 ADVANCE_LP_RING();
1076
1077 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1078}
1079
1080static void i830_dma_dispatch_vertex(struct drm_device *dev,
1081 struct drm_buf *buf, int discard, int used)
1082{
1083 drm_i830_private_t *dev_priv = dev->dev_private;
1084 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1085 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1086 struct drm_clip_rect *box = sarea_priv->boxes;
1087 int nbox = sarea_priv->nbox;
1088 unsigned long address = (unsigned long)buf->bus_address;
1089 unsigned long start = address - dev->agp->base;
1090 int i = 0, u;
1091 RING_LOCALS;
1092
1093 i830_kernel_lost_context(dev);
1094
1095 if (nbox > I830_NR_SAREA_CLIPRECTS)
1096 nbox = I830_NR_SAREA_CLIPRECTS;
1097
1098 if (discard) {
1099 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1100 I830_BUF_HARDWARE);
1101 if (u != I830_BUF_CLIENT)
1102 DRM_DEBUG("xxxx 2\n");
1103 }
1104
1105 if (used > 4 * 1023)
1106 used = 0;
1107
1108 if (sarea_priv->dirty)
1109 i830EmitState(dev);
1110
1111 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
1112 address, used, nbox);
1113
1114 dev_priv->counter++;
1115 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1116 DRM_DEBUG("i830_dma_dispatch\n");
1117 DRM_DEBUG("start : %lx\n", start);
1118 DRM_DEBUG("used : %d\n", used);
1119 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1120
1121 if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
1122 u32 *vp = buf_priv->kernel_virtual;
1123
1124 vp[0] = (GFX_OP_PRIMITIVE |
1125 sarea_priv->vertex_prim | ((used / 4) - 2));
1126
1127 if (dev_priv->use_mi_batchbuffer_start) {
1128 vp[used / 4] = MI_BATCH_BUFFER_END;
1129 used += 4;
1130 }
1131
1132 if (used & 4) {
1133 vp[used / 4] = 0;
1134 used += 4;
1135 }
1136
1137 i830_unmap_buffer(buf);
1138 }
1139
1140 if (used) {
1141 do {
1142 if (i < nbox) {
1143 BEGIN_LP_RING(6);
1144 OUT_RING(GFX_OP_DRAWRECT_INFO);
1145 OUT_RING(sarea_priv->
1146 BufferState[I830_DESTREG_DR1]);
1147 OUT_RING(box[i].x1 | (box[i].y1 << 16));
1148 OUT_RING(box[i].x2 | (box[i].y2 << 16));
1149 OUT_RING(sarea_priv->
1150 BufferState[I830_DESTREG_DR4]);
1151 OUT_RING(0);
1152 ADVANCE_LP_RING();
1153 }
1154
1155 if (dev_priv->use_mi_batchbuffer_start) {
1156 BEGIN_LP_RING(2);
1157 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
1158 OUT_RING(start | MI_BATCH_NON_SECURE);
1159 ADVANCE_LP_RING();
1160 } else {
1161 BEGIN_LP_RING(4);
1162 OUT_RING(MI_BATCH_BUFFER);
1163 OUT_RING(start | MI_BATCH_NON_SECURE);
1164 OUT_RING(start + used - 4);
1165 OUT_RING(0);
1166 ADVANCE_LP_RING();
1167 }
1168
1169 } while (++i < nbox);
1170 }
1171
1172 if (discard) {
1173 dev_priv->counter++;
1174
1175 (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1176 I830_BUF_HARDWARE);
1177
1178 BEGIN_LP_RING(8);
1179 OUT_RING(CMD_STORE_DWORD_IDX);
1180 OUT_RING(20);
1181 OUT_RING(dev_priv->counter);
1182 OUT_RING(CMD_STORE_DWORD_IDX);
1183 OUT_RING(buf_priv->my_use_idx);
1184 OUT_RING(I830_BUF_FREE);
1185 OUT_RING(CMD_REPORT_HEAD);
1186 OUT_RING(0);
1187 ADVANCE_LP_RING();
1188 }
1189}
1190
1191static void i830_dma_quiescent(struct drm_device *dev)
1192{
1193 drm_i830_private_t *dev_priv = dev->dev_private;
1194 RING_LOCALS;
1195
1196 i830_kernel_lost_context(dev);
1197
1198 BEGIN_LP_RING(4);
1199 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
1200 OUT_RING(CMD_REPORT_HEAD);
1201 OUT_RING(0);
1202 OUT_RING(0);
1203 ADVANCE_LP_RING();
1204
1205 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1206}
1207
1208static int i830_flush_queue(struct drm_device *dev)
1209{
1210 drm_i830_private_t *dev_priv = dev->dev_private;
1211 struct drm_device_dma *dma = dev->dma;
1212 int i, ret = 0;
1213 RING_LOCALS;
1214
1215 i830_kernel_lost_context(dev);
1216
1217 BEGIN_LP_RING(2);
1218 OUT_RING(CMD_REPORT_HEAD);
1219 OUT_RING(0);
1220 ADVANCE_LP_RING();
1221
1222 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1223
1224 for (i = 0; i < dma->buf_count; i++) {
1225 struct drm_buf *buf = dma->buflist[i];
1226 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1227
1228 int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
1229 I830_BUF_FREE);
1230
1231 if (used == I830_BUF_HARDWARE)
1232 DRM_DEBUG("reclaimed from HARDWARE\n");
1233 if (used == I830_BUF_CLIENT)
1234 DRM_DEBUG("still on client\n");
1235 }
1236
1237 return ret;
1238}
1239
1240/* Must be called with the lock held */
1241static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1242{
1243 struct drm_device_dma *dma = dev->dma;
1244 int i;
1245
1246 if (!dma)
1247 return;
1248 if (!dev->dev_private)
1249 return;
1250 if (!dma->buflist)
1251 return;
1252
1253 i830_flush_queue(dev);
1254
1255 for (i = 0; i < dma->buf_count; i++) {
1256 struct drm_buf *buf = dma->buflist[i];
1257 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1258
1259 if (buf->file_priv == file_priv && buf_priv) {
1260 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1261 I830_BUF_FREE);
1262
1263 if (used == I830_BUF_CLIENT)
1264 DRM_DEBUG("reclaimed from client\n");
1265 if (buf_priv->currently_mapped == I830_BUF_MAPPED)
1266 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
1267 }
1268 }
1269}
1270
1271static int i830_flush_ioctl(struct drm_device *dev, void *data,
1272 struct drm_file *file_priv)
1273{
1274 LOCK_TEST_WITH_RETURN(dev, file_priv);
1275
1276 i830_flush_queue(dev);
1277 return 0;
1278}
1279
1280static int i830_dma_vertex(struct drm_device *dev, void *data,
1281 struct drm_file *file_priv)
1282{
1283 struct drm_device_dma *dma = dev->dma;
1284 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1285 u32 *hw_status = dev_priv->hw_status_page;
1286 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1287 dev_priv->sarea_priv;
1288 drm_i830_vertex_t *vertex = data;
1289
1290 LOCK_TEST_WITH_RETURN(dev, file_priv);
1291
1292 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1293 vertex->idx, vertex->used, vertex->discard);
1294
1295 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1296 return -EINVAL;
1297
1298 i830_dma_dispatch_vertex(dev,
1299 dma->buflist[vertex->idx],
1300 vertex->discard, vertex->used);
1301
1302 sarea_priv->last_enqueue = dev_priv->counter - 1;
1303 sarea_priv->last_dispatch = (int)hw_status[5];
1304
1305 return 0;
1306}
1307
1308static int i830_clear_bufs(struct drm_device *dev, void *data,
1309 struct drm_file *file_priv)
1310{
1311 drm_i830_clear_t *clear = data;
1312
1313 LOCK_TEST_WITH_RETURN(dev, file_priv);
1314
1315 /* GH: Someone's doing nasty things... */
1316 if (!dev->dev_private)
1317 return -EINVAL;
1318
1319 i830_dma_dispatch_clear(dev, clear->flags,
1320 clear->clear_color,
1321 clear->clear_depth, clear->clear_depthmask);
1322 return 0;
1323}
1324
1325static int i830_swap_bufs(struct drm_device *dev, void *data,
1326 struct drm_file *file_priv)
1327{
1328 DRM_DEBUG("i830_swap_bufs\n");
1329
1330 LOCK_TEST_WITH_RETURN(dev, file_priv);
1331
1332 i830_dma_dispatch_swap(dev);
1333 return 0;
1334}
1335
1336/* Not sure why this isn't set all the time:
1337 */
1338static void i830_do_init_pageflip(struct drm_device *dev)
1339{
1340 drm_i830_private_t *dev_priv = dev->dev_private;
1341
1342 DRM_DEBUG("%s\n", __func__);
1343 dev_priv->page_flipping = 1;
1344 dev_priv->current_page = 0;
1345 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1346}
1347
1348static int i830_do_cleanup_pageflip(struct drm_device *dev)
1349{
1350 drm_i830_private_t *dev_priv = dev->dev_private;
1351
1352 DRM_DEBUG("%s\n", __func__);
1353 if (dev_priv->current_page != 0)
1354 i830_dma_dispatch_flip(dev);
1355
1356 dev_priv->page_flipping = 0;
1357 return 0;
1358}
1359
1360static int i830_flip_bufs(struct drm_device *dev, void *data,
1361 struct drm_file *file_priv)
1362{
1363 drm_i830_private_t *dev_priv = dev->dev_private;
1364
1365 DRM_DEBUG("%s\n", __func__);
1366
1367 LOCK_TEST_WITH_RETURN(dev, file_priv);
1368
1369 if (!dev_priv->page_flipping)
1370 i830_do_init_pageflip(dev);
1371
1372 i830_dma_dispatch_flip(dev);
1373 return 0;
1374}
1375
1376static int i830_getage(struct drm_device *dev, void *data,
1377 struct drm_file *file_priv)
1378{
1379 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1380 u32 *hw_status = dev_priv->hw_status_page;
1381 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1382 dev_priv->sarea_priv;
1383
1384 sarea_priv->last_dispatch = (int)hw_status[5];
1385 return 0;
1386}
1387
1388static int i830_getbuf(struct drm_device *dev, void *data,
1389 struct drm_file *file_priv)
1390{
1391 int retcode = 0;
1392 drm_i830_dma_t *d = data;
1393 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1394 u32 *hw_status = dev_priv->hw_status_page;
1395 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1396 dev_priv->sarea_priv;
1397
1398 DRM_DEBUG("getbuf\n");
1399
1400 LOCK_TEST_WITH_RETURN(dev, file_priv);
1401
1402 d->granted = 0;
1403
1404 retcode = i830_dma_get_buffer(dev, d, file_priv);
1405
1406 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1407 task_pid_nr(current), retcode, d->granted);
1408
1409 sarea_priv->last_dispatch = (int)hw_status[5];
1410
1411 return retcode;
1412}
1413
1414static int i830_copybuf(struct drm_device *dev, void *data,
1415 struct drm_file *file_priv)
1416{
1417 /* Never copy - 2.4.x doesn't need it */
1418 return 0;
1419}
1420
1421static int i830_docopy(struct drm_device *dev, void *data,
1422 struct drm_file *file_priv)
1423{
1424 return 0;
1425}
1426
1427static int i830_getparam(struct drm_device *dev, void *data,
1428 struct drm_file *file_priv)
1429{
1430 drm_i830_private_t *dev_priv = dev->dev_private;
1431 drm_i830_getparam_t *param = data;
1432 int value;
1433
1434 if (!dev_priv) {
1435 DRM_ERROR("%s called with no initialization\n", __func__);
1436 return -EINVAL;
1437 }
1438
1439 switch (param->param) {
1440 case I830_PARAM_IRQ_ACTIVE:
1441 value = dev->irq_enabled;
1442 break;
1443 default:
1444 return -EINVAL;
1445 }
1446
1447 if (copy_to_user(param->value, &value, sizeof(int))) {
1448 DRM_ERROR("copy_to_user\n");
1449 return -EFAULT;
1450 }
1451
1452 return 0;
1453}
1454
1455static int i830_setparam(struct drm_device *dev, void *data,
1456 struct drm_file *file_priv)
1457{
1458 drm_i830_private_t *dev_priv = dev->dev_private;
1459 drm_i830_setparam_t *param = data;
1460
1461 if (!dev_priv) {
1462 DRM_ERROR("%s called with no initialization\n", __func__);
1463 return -EINVAL;
1464 }
1465
1466 switch (param->param) {
1467 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1468 dev_priv->use_mi_batchbuffer_start = param->value;
1469 break;
1470 default:
1471 return -EINVAL;
1472 }
1473
1474 return 0;
1475}
1476
1477int i830_driver_load(struct drm_device *dev, unsigned long flags)
1478{
1479 /* i830 has 4 more counters */
1480 dev->counters += 4;
1481 dev->types[6] = _DRM_STAT_IRQ;
1482 dev->types[7] = _DRM_STAT_PRIMARY;
1483 dev->types[8] = _DRM_STAT_SECONDARY;
1484 dev->types[9] = _DRM_STAT_DMA;
1485
1486 return 0;
1487}
1488
1489void i830_driver_lastclose(struct drm_device *dev)
1490{
1491 i830_dma_cleanup(dev);
1492}
1493
1494void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1495{
1496 if (dev->dev_private) {
1497 drm_i830_private_t *dev_priv = dev->dev_private;
1498 if (dev_priv->page_flipping)
1499 i830_do_cleanup_pageflip(dev);
1500 }
1501}
1502
1503void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
1504{
1505 i830_reclaim_buffers(dev, file_priv);
1506}
1507
1508int i830_driver_dma_quiescent(struct drm_device *dev)
1509{
1510 i830_dma_quiescent(dev);
1511 return 0;
1512}
1513
1514/*
1515 * call the drm_ioctl under the big kernel lock because
1516 * to lock against the i830_mmap_buffers function.
1517 */
1518long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1519{
1520 int ret;
1521 lock_kernel();
1522 ret = drm_ioctl(file, cmd, arg);
1523 unlock_kernel();
1524 return ret;
1525}
1526
1527struct drm_ioctl_desc i830_ioctls[] = {
1528 DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1529 DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1530 DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1531 DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1532 DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
1533 DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
1534 DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1535 DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
1536 DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
1537 DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1538 DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
1539 DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
1540 DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
1541 DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
1542};
1543
1544int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1545
1546/**
1547 * Determine if the device really is AGP or not.
1548 *
1549 * All Intel graphics chipsets are treated as AGP, even if they are really
1550 * PCI-e.
1551 *
1552 * \param dev The device to be tested.
1553 *
1554 * \returns
1555 * A value of 1 is always retured to indictate every i8xx is AGP.
1556 */
1557int i830_driver_device_is_agp(struct drm_device *dev)
1558{
1559 return 1;
1560}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index f655ab7977d..00000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
1/* i830_drv.c -- I810 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Jeff Hartmann <jhartmann@valinux.com>
30 * Gareth Hughes <gareth@valinux.com>
31 * Abraham vd Merwe <abraham@2d3d.co.za>
32 * Keith Whitwell <keith@tungstengraphics.com>
33 */
34
35#include "drmP.h"
36#include "drm.h"
37#include "i830_drm.h"
38#include "i830_drv.h"
39
40#include "drm_pciids.h"
41
42static struct pci_device_id pciidlist[] = {
43 i830_PCI_IDS
44};
45
46static struct drm_driver driver = {
47 .driver_features =
48 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
49 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
50#if USE_IRQS
51 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
52#endif
53 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
54 .load = i830_driver_load,
55 .lastclose = i830_driver_lastclose,
56 .preclose = i830_driver_preclose,
57 .device_is_agp = i830_driver_device_is_agp,
58 .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
59 .dma_quiescent = i830_driver_dma_quiescent,
60#if USE_IRQS
61 .irq_preinstall = i830_driver_irq_preinstall,
62 .irq_postinstall = i830_driver_irq_postinstall,
63 .irq_uninstall = i830_driver_irq_uninstall,
64 .irq_handler = i830_driver_irq_handler,
65#endif
66 .ioctls = i830_ioctls,
67 .fops = {
68 .owner = THIS_MODULE,
69 .open = drm_open,
70 .release = drm_release,
71 .unlocked_ioctl = i830_ioctl,
72 .mmap = drm_mmap,
73 .poll = drm_poll,
74 .fasync = drm_fasync,
75 .llseek = noop_llseek,
76 },
77
78 .pci_driver = {
79 .name = DRIVER_NAME,
80 .id_table = pciidlist,
81 },
82
83 .name = DRIVER_NAME,
84 .desc = DRIVER_DESC,
85 .date = DRIVER_DATE,
86 .major = DRIVER_MAJOR,
87 .minor = DRIVER_MINOR,
88 .patchlevel = DRIVER_PATCHLEVEL,
89};
90
91static int __init i830_init(void)
92{
93 driver.num_ioctls = i830_max_ioctl;
94 return drm_init(&driver);
95}
96
97static void __exit i830_exit(void)
98{
99 drm_exit(&driver);
100}
101
102module_init(i830_init);
103module_exit(i830_exit);
104
105MODULE_AUTHOR(DRIVER_AUTHOR);
106MODULE_DESCRIPTION(DRIVER_DESC);
107MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index 0df1c720560..00000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,295 +0,0 @@
1/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 *
30 */
31
32#ifndef _I830_DRV_H_
33#define _I830_DRV_H_
34
35/* General customization:
36 */
37
38#define DRIVER_AUTHOR "VA Linux Systems Inc."
39
40#define DRIVER_NAME "i830"
41#define DRIVER_DESC "Intel 830M"
42#define DRIVER_DATE "20021108"
43
44/* Interface history:
45 *
46 * 1.1: Original.
47 * 1.2: ?
48 * 1.3: New irq emit/wait ioctls.
49 * New pageflip ioctl.
50 * New getparam ioctl.
51 * State for texunits 3&4 in sarea.
52 * New (alternative) layout for texture state.
53 */
54#define DRIVER_MAJOR 1
55#define DRIVER_MINOR 3
56#define DRIVER_PATCHLEVEL 2
57
58/* Driver will work either way: IRQ's save cpu time when waiting for
59 * the card, but are subject to subtle interactions between bios,
60 * hardware and the driver.
61 */
62/* XXX: Add vblank support? */
63#define USE_IRQS 0
64
65typedef struct drm_i830_buf_priv {
66 u32 *in_use;
67 int my_use_idx;
68 int currently_mapped;
69 void __user *virtual;
70 void *kernel_virtual;
71 drm_local_map_t map;
72} drm_i830_buf_priv_t;
73
74typedef struct _drm_i830_ring_buffer {
75 int tail_mask;
76 unsigned long Start;
77 unsigned long End;
78 unsigned long Size;
79 u8 *virtual_start;
80 int head;
81 int tail;
82 int space;
83 drm_local_map_t map;
84} drm_i830_ring_buffer_t;
85
86typedef struct drm_i830_private {
87 struct drm_local_map *sarea_map;
88 struct drm_local_map *mmio_map;
89
90 drm_i830_sarea_t *sarea_priv;
91 drm_i830_ring_buffer_t ring;
92
93 void *hw_status_page;
94 unsigned long counter;
95
96 dma_addr_t dma_status_page;
97
98 struct drm_buf *mmap_buffer;
99
100 u32 front_di1, back_di1, zi1;
101
102 int back_offset;
103 int depth_offset;
104 int front_offset;
105 int w, h;
106 int pitch;
107 int back_pitch;
108 int depth_pitch;
109 unsigned int cpp;
110
111 int do_boxes;
112 int dma_used;
113
114 int current_page;
115 int page_flipping;
116
117 wait_queue_head_t irq_queue;
118 atomic_t irq_received;
119 atomic_t irq_emitted;
120
121 int use_mi_batchbuffer_start;
122
123} drm_i830_private_t;
124
125long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126extern struct drm_ioctl_desc i830_ioctls[];
127extern int i830_max_ioctl;
128
129/* i830_irq.c */
130extern int i830_irq_emit(struct drm_device *dev, void *data,
131 struct drm_file *file_priv);
132extern int i830_irq_wait(struct drm_device *dev, void *data,
133 struct drm_file *file_priv);
134
135extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
136extern void i830_driver_irq_preinstall(struct drm_device *dev);
137extern void i830_driver_irq_postinstall(struct drm_device *dev);
138extern void i830_driver_irq_uninstall(struct drm_device *dev);
139extern int i830_driver_load(struct drm_device *, unsigned long flags);
140extern void i830_driver_preclose(struct drm_device *dev,
141 struct drm_file *file_priv);
142extern void i830_driver_lastclose(struct drm_device *dev);
143extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
144 struct drm_file *file_priv);
145extern int i830_driver_dma_quiescent(struct drm_device *dev);
146extern int i830_driver_device_is_agp(struct drm_device *dev);
147
148#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
149#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
150#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
151#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
152
153#define I830_VERBOSE 0
154
155#define RING_LOCALS unsigned int outring, ringmask, outcount; \
156 volatile char *virt;
157
158#define BEGIN_LP_RING(n) do { \
159 if (I830_VERBOSE) \
160 printk("BEGIN_LP_RING(%d)\n", (n)); \
161 if (dev_priv->ring.space < n*4) \
162 i830_wait_ring(dev, n*4, __func__); \
163 outcount = 0; \
164 outring = dev_priv->ring.tail; \
165 ringmask = dev_priv->ring.tail_mask; \
166 virt = dev_priv->ring.virtual_start; \
167} while (0)
168
169#define OUT_RING(n) do { \
170 if (I830_VERBOSE) \
171 printk(" OUT_RING %x\n", (int)(n)); \
172 *(volatile unsigned int *)(virt + outring) = n; \
173 outcount++; \
174 outring += 4; \
175 outring &= ringmask; \
176} while (0)
177
178#define ADVANCE_LP_RING() do { \
179 if (I830_VERBOSE) \
180 printk("ADVANCE_LP_RING %x\n", outring); \
181 dev_priv->ring.tail = outring; \
182 dev_priv->ring.space -= outcount * 4; \
183 I830_WRITE(LP_RING + RING_TAIL, outring); \
184} while (0)
185
186extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
187
188#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
189#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
190#define CMD_REPORT_HEAD (7<<23)
191#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
192#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
193
194#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
195#define LOAD_TEXTURE_MAP0 (1<<11)
196
197#define INST_PARSER_CLIENT 0x00000000
198#define INST_OP_FLUSH 0x02000000
199#define INST_FLUSH_MAP_CACHE 0x00000001
200
201#define BB1_START_ADDR_MASK (~0x7)
202#define BB1_PROTECTED (1<<0)
203#define BB1_UNPROTECTED (0<<0)
204#define BB2_END_ADDR_MASK (~0x7)
205
206#define I830REG_HWSTAM 0x02098
207#define I830REG_INT_IDENTITY_R 0x020a4
208#define I830REG_INT_MASK_R 0x020a8
209#define I830REG_INT_ENABLE_R 0x020a0
210
211#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
212
213#define LP_RING 0x2030
214#define HP_RING 0x2040
215#define RING_TAIL 0x00
216#define TAIL_ADDR 0x001FFFF8
217#define RING_HEAD 0x04
218#define HEAD_WRAP_COUNT 0xFFE00000
219#define HEAD_WRAP_ONE 0x00200000
220#define HEAD_ADDR 0x001FFFFC
221#define RING_START 0x08
222#define START_ADDR 0x0xFFFFF000
223#define RING_LEN 0x0C
224#define RING_NR_PAGES 0x001FF000
225#define RING_REPORT_MASK 0x00000006
226#define RING_REPORT_64K 0x00000002
227#define RING_REPORT_128K 0x00000004
228#define RING_NO_REPORT 0x00000000
229#define RING_VALID_MASK 0x00000001
230#define RING_VALID 0x00000001
231#define RING_INVALID 0x00000000
232
233#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
234#define SC_UPDATE_SCISSOR (0x1<<1)
235#define SC_ENABLE_MASK (0x1<<0)
236#define SC_ENABLE (0x1<<0)
237
238#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
239#define SCI_YMIN_MASK (0xffff<<16)
240#define SCI_XMIN_MASK (0xffff<<0)
241#define SCI_YMAX_MASK (0xffff<<16)
242#define SCI_XMAX_MASK (0xffff<<0)
243
244#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
245#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
246#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
247#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
248#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
249#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
250#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
251#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
252
253#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
254
255#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
256#define ASYNC_FLIP (1<<22)
257
258#define CMD_3D (0x3<<29)
259#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
260#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
261
262#define BR00_BITBLT_CLIENT 0x40000000
263#define BR00_OP_COLOR_BLT 0x10000000
264#define BR00_OP_SRC_COPY_BLT 0x10C00000
265#define BR13_SOLID_PATTERN 0x80000000
266
267#define BUF_3D_ID_COLOR_BACK (0x3<<24)
268#define BUF_3D_ID_DEPTH (0x7<<24)
269#define BUF_3D_USE_FENCE (1<<23)
270#define BUF_3D_PITCH(x) (((x)/4)<<2)
271
272#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
273#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
274#define MAP_PALETTE_BOTH (1<<11)
275
276#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
277#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
278#define XY_COLOR_BLT_WRITE_RGB (1<<20)
279
280#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
281#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
282#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
283
284#define MI_BATCH_BUFFER ((0x30<<23)|1)
285#define MI_BATCH_BUFFER_START (0x31<<23)
286#define MI_BATCH_BUFFER_END (0xA<<23)
287#define MI_BATCH_NON_SECURE (1)
288
289#define MI_WAIT_FOR_EVENT ((0x3<<23))
290#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
291#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
292
293#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
294
295#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index d1a6b95d631..00000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 *
3 * Copyright 2002 Tungsten Graphics, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors: Keith Whitwell <keith@tungstengraphics.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i830_drm.h"
32#include "i830_drv.h"
33#include <linux/interrupt.h> /* For task queue support */
34#include <linux/delay.h>
35
36irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
37{
38 struct drm_device *dev = (struct drm_device *) arg;
39 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
40 u16 temp;
41
42 temp = I830_READ16(I830REG_INT_IDENTITY_R);
43 DRM_DEBUG("%x\n", temp);
44
45 if (!(temp & 2))
46 return IRQ_NONE;
47
48 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
49
50 atomic_inc(&dev_priv->irq_received);
51 wake_up_interruptible(&dev_priv->irq_queue);
52
53 return IRQ_HANDLED;
54}
55
56static int i830_emit_irq(struct drm_device *dev)
57{
58 drm_i830_private_t *dev_priv = dev->dev_private;
59 RING_LOCALS;
60
61 DRM_DEBUG("%s\n", __func__);
62
63 atomic_inc(&dev_priv->irq_emitted);
64
65 BEGIN_LP_RING(2);
66 OUT_RING(0);
67 OUT_RING(GFX_OP_USER_INTERRUPT);
68 ADVANCE_LP_RING();
69
70 return atomic_read(&dev_priv->irq_emitted);
71}
72
73static int i830_wait_irq(struct drm_device *dev, int irq_nr)
74{
75 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
76 DECLARE_WAITQUEUE(entry, current);
77 unsigned long end = jiffies + HZ * 3;
78 int ret = 0;
79
80 DRM_DEBUG("%s\n", __func__);
81
82 if (atomic_read(&dev_priv->irq_received) >= irq_nr)
83 return 0;
84
85 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
86
87 add_wait_queue(&dev_priv->irq_queue, &entry);
88
89 for (;;) {
90 __set_current_state(TASK_INTERRUPTIBLE);
91 if (atomic_read(&dev_priv->irq_received) >= irq_nr)
92 break;
93 if ((signed)(end - jiffies) <= 0) {
94 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
95 I830_READ16(I830REG_INT_IDENTITY_R),
96 I830_READ16(I830REG_INT_MASK_R),
97 I830_READ16(I830REG_INT_ENABLE_R),
98 I830_READ16(I830REG_HWSTAM));
99
100 ret = -EBUSY; /* Lockup? Missed irq? */
101 break;
102 }
103 schedule_timeout(HZ * 3);
104 if (signal_pending(current)) {
105 ret = -EINTR;
106 break;
107 }
108 }
109
110 __set_current_state(TASK_RUNNING);
111 remove_wait_queue(&dev_priv->irq_queue, &entry);
112 return ret;
113}
114
115/* Needs the lock as it touches the ring.
116 */
117int i830_irq_emit(struct drm_device *dev, void *data,
118 struct drm_file *file_priv)
119{
120 drm_i830_private_t *dev_priv = dev->dev_private;
121 drm_i830_irq_emit_t *emit = data;
122 int result;
123
124 LOCK_TEST_WITH_RETURN(dev, file_priv);
125
126 if (!dev_priv) {
127 DRM_ERROR("%s called with no initialization\n", __func__);
128 return -EINVAL;
129 }
130
131 result = i830_emit_irq(dev);
132
133 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
134 DRM_ERROR("copy_to_user\n");
135 return -EFAULT;
136 }
137
138 return 0;
139}
140
141/* Doesn't need the hardware lock.
142 */
143int i830_irq_wait(struct drm_device *dev, void *data,
144 struct drm_file *file_priv)
145{
146 drm_i830_private_t *dev_priv = dev->dev_private;
147 drm_i830_irq_wait_t *irqwait = data;
148
149 if (!dev_priv) {
150 DRM_ERROR("%s called with no initialization\n", __func__);
151 return -EINVAL;
152 }
153
154 return i830_wait_irq(dev, irqwait->irq_seq);
155}
156
157/* drm_dma.h hooks
158*/
159void i830_driver_irq_preinstall(struct drm_device *dev)
160{
161 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
162
163 I830_WRITE16(I830REG_HWSTAM, 0xffff);
164 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
165 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
166 atomic_set(&dev_priv->irq_received, 0);
167 atomic_set(&dev_priv->irq_emitted, 0);
168 init_waitqueue_head(&dev_priv->irq_queue);
169}
170
171void i830_driver_irq_postinstall(struct drm_device *dev)
172{
173 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
174
175 I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
176}
177
178void i830_driver_irq_uninstall(struct drm_device *dev)
179{
180 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
181 if (!dev_priv)
182 return;
183
184 I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
185 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
186}
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index ffec177f348..3a60ac88952 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -2,7 +2,6 @@ header-y += drm.h
2header-y += drm_mode.h 2header-y += drm_mode.h
3header-y += drm_sarea.h 3header-y += drm_sarea.h
4header-y += i810_drm.h 4header-y += i810_drm.h
5header-y += i830_drm.h
6header-y += i915_drm.h 5header-y += i915_drm.h
7header-y += mga_drm.h 6header-y += mga_drm.h
8header-y += nouveau_drm.h 7header-y += nouveau_drm.h
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
deleted file mode 100644
index 61315c29b8f..00000000000
--- a/include/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
1#ifndef _I830_DRM_H_
2#define _I830_DRM_H_
3
4/* WARNING: These defines must be the same as what the Xserver uses.
5 * if you change them, you must change the defines in the Xserver.
6 *
7 * KW: Actually, you can't ever change them because doing so would
8 * break backwards compatibility.
9 */
10
11#ifndef _I830_DEFINES_
12#define _I830_DEFINES_
13
14#define I830_DMA_BUF_ORDER 12
15#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
16#define I830_DMA_BUF_NR 256
17#define I830_NR_SAREA_CLIPRECTS 8
18
19/* Each region is a minimum of 64k, and there are at most 64 of them.
20 */
21#define I830_NR_TEX_REGIONS 64
22#define I830_LOG_MIN_TEX_REGION_SIZE 16
23
24/* KW: These aren't correct but someone set them to two and then
25 * released the module. Now we can't change them as doing so would
26 * break backwards compatibility.
27 */
28#define I830_TEXTURE_COUNT 2
29#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
30
31#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
32
33#define I830_UPLOAD_CTX 0x1
34#define I830_UPLOAD_BUFFERS 0x2
35#define I830_UPLOAD_CLIPRECTS 0x4
36#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
37#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
38#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
39#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
40#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
41#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
42#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
43#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
44#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
45#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
46#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
47#define I830_UPLOAD_TEX0 0x10000
48#define I830_UPLOAD_TEX1 0x20000
49#define I830_UPLOAD_TEX2 0x40000
50#define I830_UPLOAD_TEX3 0x80000
51#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
52#define I830_UPLOAD_TEX_MASK 0xf0000
53#define I830_UPLOAD_TEXBLEND0 0x100000
54#define I830_UPLOAD_TEXBLEND1 0x200000
55#define I830_UPLOAD_TEXBLEND2 0x400000
56#define I830_UPLOAD_TEXBLEND3 0x800000
57#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
58#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
59#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
60#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
61#define I830_UPLOAD_STIPPLE 0x8000000
62
63/* Indices into buf.Setup where various bits of state are mirrored per
64 * context and per buffer. These can be fired at the card as a unit,
65 * or in a piecewise fashion as required.
66 */
67
68/* Destbuffer state
69 * - backbuffer linear offset and pitch -- invarient in the current dri
70 * - zbuffer linear offset and pitch -- also invarient
71 * - drawing origin in back and depth buffers.
72 *
73 * Keep the depth/back buffer state here to accommodate private buffers
74 * in the future.
75 */
76
77#define I830_DESTREG_CBUFADDR 0
78#define I830_DESTREG_DBUFADDR 1
79#define I830_DESTREG_DV0 2
80#define I830_DESTREG_DV1 3
81#define I830_DESTREG_SENABLE 4
82#define I830_DESTREG_SR0 5
83#define I830_DESTREG_SR1 6
84#define I830_DESTREG_SR2 7
85#define I830_DESTREG_DR0 8
86#define I830_DESTREG_DR1 9
87#define I830_DESTREG_DR2 10
88#define I830_DESTREG_DR3 11
89#define I830_DESTREG_DR4 12
90#define I830_DEST_SETUP_SIZE 13
91
92/* Context state
93 */
94#define I830_CTXREG_STATE1 0
95#define I830_CTXREG_STATE2 1
96#define I830_CTXREG_STATE3 2
97#define I830_CTXREG_STATE4 3
98#define I830_CTXREG_STATE5 4
99#define I830_CTXREG_IALPHAB 5
100#define I830_CTXREG_STENCILTST 6
101#define I830_CTXREG_ENABLES_1 7
102#define I830_CTXREG_ENABLES_2 8
103#define I830_CTXREG_AA 9
104#define I830_CTXREG_FOGCOLOR 10
105#define I830_CTXREG_BLENDCOLR0 11
106#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
107#define I830_CTXREG_VF 13
108#define I830_CTXREG_VF2 14
109#define I830_CTXREG_MCSB0 15
110#define I830_CTXREG_MCSB1 16
111#define I830_CTX_SETUP_SIZE 17
112
113/* 1.3: Stipple state
114 */
115#define I830_STPREG_ST0 0
116#define I830_STPREG_ST1 1
117#define I830_STP_SETUP_SIZE 2
118
119/* Texture state (per tex unit)
120 */
121
122#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
123#define I830_TEXREG_MI1 1
124#define I830_TEXREG_MI2 2
125#define I830_TEXREG_MI3 3
126#define I830_TEXREG_MI4 4
127#define I830_TEXREG_MI5 5
128#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
129#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
130#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
131#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
132#define I830_TEX_SETUP_SIZE 10
133
134#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
135#define I830_TEXREG_TM0S0 1
136#define I830_TEXREG_TM0S1 2
137#define I830_TEXREG_TM0S2 3
138#define I830_TEXREG_TM0S3 4
139#define I830_TEXREG_TM0S4 5
140#define I830_TEXREG_NOP0 6 /* noop */
141#define I830_TEXREG_NOP1 7 /* noop */
142#define I830_TEXREG_NOP2 8 /* noop */
143#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
144#define __I830_TEX_SETUP_SIZE 10
145
146#define I830_FRONT 0x1
147#define I830_BACK 0x2
148#define I830_DEPTH 0x4
149
150#endif /* _I830_DEFINES_ */
151
152typedef struct _drm_i830_init {
153 enum {
154 I830_INIT_DMA = 0x01,
155 I830_CLEANUP_DMA = 0x02
156 } func;
157 unsigned int mmio_offset;
158 unsigned int buffers_offset;
159 int sarea_priv_offset;
160 unsigned int ring_start;
161 unsigned int ring_end;
162 unsigned int ring_size;
163 unsigned int front_offset;
164 unsigned int back_offset;
165 unsigned int depth_offset;
166 unsigned int w;
167 unsigned int h;
168 unsigned int pitch;
169 unsigned int pitch_bits;
170 unsigned int back_pitch;
171 unsigned int depth_pitch;
172 unsigned int cpp;
173} drm_i830_init_t;
174
175/* Warning: If you change the SAREA structure you must change the Xserver
176 * structure as well */
177
178typedef struct _drm_i830_tex_region {
179 unsigned char next, prev; /* indices to form a circular LRU */
180 unsigned char in_use; /* owned by a client, or free? */
181 int age; /* tracked by clients to update local LRU's */
182} drm_i830_tex_region_t;
183
184typedef struct _drm_i830_sarea {
185 unsigned int ContextState[I830_CTX_SETUP_SIZE];
186 unsigned int BufferState[I830_DEST_SETUP_SIZE];
187 unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
188 unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
189 unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
190 unsigned int Palette[2][256];
191 unsigned int dirty;
192
193 unsigned int nbox;
194 struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
195
196 /* Maintain an LRU of contiguous regions of texture space. If
197 * you think you own a region of texture memory, and it has an
198 * age different to the one you set, then you are mistaken and
199 * it has been stolen by another client. If global texAge
200 * hasn't changed, there is no need to walk the list.
201 *
202 * These regions can be used as a proxy for the fine-grained
203 * texture information of other clients - by maintaining them
204 * in the same lru which is used to age their own textures,
205 * clients have an approximate lru for the whole of global
206 * texture space, and can make informed decisions as to which
207 * areas to kick out. There is no need to choose whether to
208 * kick out your own texture or someone else's - simply eject
209 * them all in LRU order.
210 */
211
212 drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
213 /* Last elt is sentinal */
214 int texAge; /* last time texture was uploaded */
215 int last_enqueue; /* last time a buffer was enqueued */
216 int last_dispatch; /* age of the most recently dispatched buffer */
217 int last_quiescent; /* */
218 int ctxOwner; /* last context to upload state */
219
220 int vertex_prim;
221
222 int pf_enabled; /* is pageflipping allowed? */
223 int pf_active;
224 int pf_current_page; /* which buffer is being displayed? */
225
226 int perf_boxes; /* performance boxes to be displayed */
227
228 /* Here's the state for texunits 2,3:
229 */
230 unsigned int TexState2[I830_TEX_SETUP_SIZE];
231 unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
232 unsigned int TexBlendStateWordsUsed2;
233
234 unsigned int TexState3[I830_TEX_SETUP_SIZE];
235 unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
236 unsigned int TexBlendStateWordsUsed3;
237
238 unsigned int StippleState[I830_STP_SETUP_SIZE];
239} drm_i830_sarea_t;
240
241/* Flags for perf_boxes
242 */
243#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
244#define I830_BOX_FLIP 0x2 /* populated by kernel */
245#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
246#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
247#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
248
249/* I830 specific ioctls
250 * The device specific ioctl range is 0x40 to 0x79.
251 */
252#define DRM_I830_INIT 0x00
253#define DRM_I830_VERTEX 0x01
254#define DRM_I830_CLEAR 0x02
255#define DRM_I830_FLUSH 0x03
256#define DRM_I830_GETAGE 0x04
257#define DRM_I830_GETBUF 0x05
258#define DRM_I830_SWAP 0x06
259#define DRM_I830_COPY 0x07
260#define DRM_I830_DOCOPY 0x08
261#define DRM_I830_FLIP 0x09
262#define DRM_I830_IRQ_EMIT 0x0a
263#define DRM_I830_IRQ_WAIT 0x0b
264#define DRM_I830_GETPARAM 0x0c
265#define DRM_I830_SETPARAM 0x0d
266
267#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
268#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
269#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
270#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
271#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
272#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
273#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
274#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
275#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
276#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
277#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
278#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
279#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
280#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
281
282typedef struct _drm_i830_clear {
283 int clear_color;
284 int clear_depth;
285 int flags;
286 unsigned int clear_colormask;
287 unsigned int clear_depthmask;
288} drm_i830_clear_t;
289
290/* These may be placeholders if we have more cliprects than
291 * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
292 * false, indicating that the buffer will be dispatched again with a
293 * new set of cliprects.
294 */
295typedef struct _drm_i830_vertex {
296 int idx; /* buffer index */
297 int used; /* nr bytes in use */
298 int discard; /* client is finished with the buffer? */
299} drm_i830_vertex_t;
300
301typedef struct _drm_i830_copy_t {
302 int idx; /* buffer index */
303 int used; /* nr bytes in use */
304 void __user *address; /* Address to copy from */
305} drm_i830_copy_t;
306
307typedef struct drm_i830_dma {
308 void __user *virtual;
309 int request_idx;
310 int request_size;
311 int granted;
312} drm_i830_dma_t;
313
314/* 1.3: Userspace can request & wait on irq's:
315 */
316typedef struct drm_i830_irq_emit {
317 int __user *irq_seq;
318} drm_i830_irq_emit_t;
319
320typedef struct drm_i830_irq_wait {
321 int irq_seq;
322} drm_i830_irq_wait_t;
323
324/* 1.3: New ioctl to query kernel params:
325 */
326#define I830_PARAM_IRQ_ACTIVE 1
327
328typedef struct drm_i830_getparam {
329 int param;
330 int __user *value;
331} drm_i830_getparam_t;
332
333/* 1.3: New ioctl to set kernel params:
334 */
335#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
336
337typedef struct drm_i830_setparam {
338 int param;
339 int value;
340} drm_i830_setparam_t;
341
342#endif /* _I830_DRM_H_ */