aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/i810_dma.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/char/drm/i810_dma.c
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/char/drm/i810_dma.c')
-rw-r--r--drivers/char/drm/i810_dma.c1283
1 files changed, 0 insertions, 1283 deletions
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
deleted file mode 100644
index e5de8ea41544..000000000000
--- a/drivers/char/drm/i810_dma.c
+++ /dev/null
@@ -1,1283 +0,0 @@
1/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "i810_drm.h"
36#include "i810_drv.h"
37#include <linux/interrupt.h> /* For task queue support */
38#include <linux/delay.h>
39#include <linux/pagemap.h>
40
41#define I810_BUF_FREE 2
42#define I810_BUF_CLIENT 1
43#define I810_BUF_HARDWARE 0
44
45#define I810_BUF_UNMAPPED 0
46#define I810_BUF_MAPPED 1
47
48static struct drm_buf *i810_freelist_get(struct drm_device * dev)
49{
50 struct drm_device_dma *dma = dev->dma;
51 int i;
52 int used;
53
54 /* Linear search might not be the best solution */
55
56 for (i = 0; i < dma->buf_count; i++) {
57 struct drm_buf *buf = dma->buflist[i];
58 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
59 /* In use is already a pointer */
60 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
61 I810_BUF_CLIENT);
62 if (used == I810_BUF_FREE) {
63 return buf;
64 }
65 }
66 return NULL;
67}
68
69/* This should only be called if the buffer is not sent to the hardware
70 * yet, the hardware updates in use for us once its on the ring buffer.
71 */
72
73static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
74{
75 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
76 int used;
77
78 /* In use is already a pointer */
79 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
80 if (used != I810_BUF_CLIENT) {
81 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
82 return -EINVAL;
83 }
84
85 return 0;
86}
87
88static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
89{
90 struct drm_file *priv = filp->private_data;
91 struct drm_device *dev;
92 drm_i810_private_t *dev_priv;
93 struct drm_buf *buf;
94 drm_i810_buf_priv_t *buf_priv;
95
96 lock_kernel();
97 dev = priv->minor->dev;
98 dev_priv = dev->dev_private;
99 buf = dev_priv->mmap_buffer;
100 buf_priv = buf->dev_private;
101
102 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
103 vma->vm_file = filp;
104
105 buf_priv->currently_mapped = I810_BUF_MAPPED;
106 unlock_kernel();
107
108 if (io_remap_pfn_range(vma, vma->vm_start,
109 vma->vm_pgoff,
110 vma->vm_end - vma->vm_start, vma->vm_page_prot))
111 return -EAGAIN;
112 return 0;
113}
114
115static const struct file_operations i810_buffer_fops = {
116 .open = drm_open,
117 .release = drm_release,
118 .ioctl = drm_ioctl,
119 .mmap = i810_mmap_buffers,
120 .fasync = drm_fasync,
121};
122
123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{
125 struct drm_device *dev = file_priv->minor->dev;
126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
127 drm_i810_private_t *dev_priv = dev->dev_private;
128 const struct file_operations *old_fops;
129 int retcode = 0;
130
131 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
132 return -EINVAL;
133
134 down_write(&current->mm->mmap_sem);
135 old_fops = file_priv->filp->f_op;
136 file_priv->filp->f_op = &i810_buffer_fops;
137 dev_priv->mmap_buffer = buf;
138 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
139 PROT_READ | PROT_WRITE,
140 MAP_SHARED, buf->bus_address);
141 dev_priv->mmap_buffer = NULL;
142 file_priv->filp->f_op = old_fops;
143 if (IS_ERR(buf_priv->virtual)) {
144 /* Real error */
145 DRM_ERROR("mmap error\n");
146 retcode = PTR_ERR(buf_priv->virtual);
147 buf_priv->virtual = NULL;
148 }
149 up_write(&current->mm->mmap_sem);
150
151 return retcode;
152}
153
154static int i810_unmap_buffer(struct drm_buf * buf)
155{
156 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
157 int retcode = 0;
158
159 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
160 return -EINVAL;
161
162 down_write(&current->mm->mmap_sem);
163 retcode = do_munmap(current->mm,
164 (unsigned long)buf_priv->virtual,
165 (size_t) buf->total);
166 up_write(&current->mm->mmap_sem);
167
168 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
169 buf_priv->virtual = NULL;
170
171 return retcode;
172}
173
174static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
175 struct drm_file *file_priv)
176{
177 struct drm_buf *buf;
178 drm_i810_buf_priv_t *buf_priv;
179 int retcode = 0;
180
181 buf = i810_freelist_get(dev);
182 if (!buf) {
183 retcode = -ENOMEM;
184 DRM_DEBUG("retcode=%d\n", retcode);
185 return retcode;
186 }
187
188 retcode = i810_map_buffer(buf, file_priv);
189 if (retcode) {
190 i810_freelist_put(dev, buf);
191 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
192 return retcode;
193 }
194 buf->file_priv = file_priv;
195 buf_priv = buf->dev_private;
196 d->granted = 1;
197 d->request_idx = buf->idx;
198 d->request_size = buf->total;
199 d->virtual = buf_priv->virtual;
200
201 return retcode;
202}
203
204static int i810_dma_cleanup(struct drm_device * dev)
205{
206 struct drm_device_dma *dma = dev->dma;
207
208 /* Make sure interrupts are disabled here because the uninstall ioctl
209 * may not have been called from userspace and after dev_private
210 * is freed, it's too late.
211 */
212 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
213 drm_irq_uninstall(dev);
214
215 if (dev->dev_private) {
216 int i;
217 drm_i810_private_t *dev_priv =
218 (drm_i810_private_t *) dev->dev_private;
219
220 if (dev_priv->ring.virtual_start) {
221 drm_core_ioremapfree(&dev_priv->ring.map, dev);
222 }
223 if (dev_priv->hw_status_page) {
224 pci_free_consistent(dev->pdev, PAGE_SIZE,
225 dev_priv->hw_status_page,
226 dev_priv->dma_status_page);
227 /* Need to rewrite hardware status page */
228 I810_WRITE(0x02080, 0x1ffff000);
229 }
230 drm_free(dev->dev_private, sizeof(drm_i810_private_t),
231 DRM_MEM_DRIVER);
232 dev->dev_private = NULL;
233
234 for (i = 0; i < dma->buf_count; i++) {
235 struct drm_buf *buf = dma->buflist[i];
236 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
237
238 if (buf_priv->kernel_virtual && buf->total)
239 drm_core_ioremapfree(&buf_priv->map, dev);
240 }
241 }
242 return 0;
243}
244
245static int i810_wait_ring(struct drm_device * dev, int n)
246{
247 drm_i810_private_t *dev_priv = dev->dev_private;
248 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
249 int iters = 0;
250 unsigned long end;
251 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
252
253 end = jiffies + (HZ * 3);
254 while (ring->space < n) {
255 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
256 ring->space = ring->head - (ring->tail + 8);
257 if (ring->space < 0)
258 ring->space += ring->Size;
259
260 if (ring->head != last_head) {
261 end = jiffies + (HZ * 3);
262 last_head = ring->head;
263 }
264
265 iters++;
266 if (time_before(end, jiffies)) {
267 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
268 DRM_ERROR("lockup\n");
269 goto out_wait_ring;
270 }
271 udelay(1);
272 }
273
274 out_wait_ring:
275 return iters;
276}
277
278static void i810_kernel_lost_context(struct drm_device * dev)
279{
280 drm_i810_private_t *dev_priv = dev->dev_private;
281 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
282
283 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
284 ring->tail = I810_READ(LP_RING + RING_TAIL);
285 ring->space = ring->head - (ring->tail + 8);
286 if (ring->space < 0)
287 ring->space += ring->Size;
288}
289
290static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
291{
292 struct drm_device_dma *dma = dev->dma;
293 int my_idx = 24;
294 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
295 int i;
296
297 if (dma->buf_count > 1019) {
298 /* Not enough space in the status page for the freelist */
299 return -EINVAL;
300 }
301
302 for (i = 0; i < dma->buf_count; i++) {
303 struct drm_buf *buf = dma->buflist[i];
304 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
305
306 buf_priv->in_use = hw_status++;
307 buf_priv->my_use_idx = my_idx;
308 my_idx += 4;
309
310 *buf_priv->in_use = I810_BUF_FREE;
311
312 buf_priv->map.offset = buf->bus_address;
313 buf_priv->map.size = buf->total;
314 buf_priv->map.type = _DRM_AGP;
315 buf_priv->map.flags = 0;
316 buf_priv->map.mtrr = 0;
317
318 drm_core_ioremap(&buf_priv->map, dev);
319 buf_priv->kernel_virtual = buf_priv->map.handle;
320
321 }
322 return 0;
323}
324
325static int i810_dma_initialize(struct drm_device * dev,
326 drm_i810_private_t * dev_priv,
327 drm_i810_init_t * init)
328{
329 struct drm_map_list *r_list;
330 memset(dev_priv, 0, sizeof(drm_i810_private_t));
331
332 list_for_each_entry(r_list, &dev->maplist, head) {
333 if (r_list->map &&
334 r_list->map->type == _DRM_SHM &&
335 r_list->map->flags & _DRM_CONTAINS_LOCK) {
336 dev_priv->sarea_map = r_list->map;
337 break;
338 }
339 }
340 if (!dev_priv->sarea_map) {
341 dev->dev_private = (void *)dev_priv;
342 i810_dma_cleanup(dev);
343 DRM_ERROR("can not find sarea!\n");
344 return -EINVAL;
345 }
346 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
347 if (!dev_priv->mmio_map) {
348 dev->dev_private = (void *)dev_priv;
349 i810_dma_cleanup(dev);
350 DRM_ERROR("can not find mmio map!\n");
351 return -EINVAL;
352 }
353 dev->agp_buffer_token = init->buffers_offset;
354 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
355 if (!dev->agp_buffer_map) {
356 dev->dev_private = (void *)dev_priv;
357 i810_dma_cleanup(dev);
358 DRM_ERROR("can not find dma buffer map!\n");
359 return -EINVAL;
360 }
361
362 dev_priv->sarea_priv = (drm_i810_sarea_t *)
363 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
364
365 dev_priv->ring.Start = init->ring_start;
366 dev_priv->ring.End = init->ring_end;
367 dev_priv->ring.Size = init->ring_size;
368
369 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
370 dev_priv->ring.map.size = init->ring_size;
371 dev_priv->ring.map.type = _DRM_AGP;
372 dev_priv->ring.map.flags = 0;
373 dev_priv->ring.map.mtrr = 0;
374
375 drm_core_ioremap(&dev_priv->ring.map, dev);
376
377 if (dev_priv->ring.map.handle == NULL) {
378 dev->dev_private = (void *)dev_priv;
379 i810_dma_cleanup(dev);
380 DRM_ERROR("can not ioremap virtual address for"
381 " ring buffer\n");
382 return -ENOMEM;
383 }
384
385 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
386
387 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
388
389 dev_priv->w = init->w;
390 dev_priv->h = init->h;
391 dev_priv->pitch = init->pitch;
392 dev_priv->back_offset = init->back_offset;
393 dev_priv->depth_offset = init->depth_offset;
394 dev_priv->front_offset = init->front_offset;
395
396 dev_priv->overlay_offset = init->overlay_offset;
397 dev_priv->overlay_physical = init->overlay_physical;
398
399 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
400 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
401 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
402
403 /* Program Hardware Status Page */
404 dev_priv->hw_status_page =
405 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
406 &dev_priv->dma_status_page);
407 if (!dev_priv->hw_status_page) {
408 dev->dev_private = (void *)dev_priv;
409 i810_dma_cleanup(dev);
410 DRM_ERROR("Can not allocate hardware status page\n");
411 return -ENOMEM;
412 }
413 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
414 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
415
416 I810_WRITE(0x02080, dev_priv->dma_status_page);
417 DRM_DEBUG("Enabled hardware status page\n");
418
419 /* Now we need to init our freelist */
420 if (i810_freelist_init(dev, dev_priv) != 0) {
421 dev->dev_private = (void *)dev_priv;
422 i810_dma_cleanup(dev);
423 DRM_ERROR("Not enough space in the status page for"
424 " the freelist\n");
425 return -ENOMEM;
426 }
427 dev->dev_private = (void *)dev_priv;
428
429 return 0;
430}
431
432static int i810_dma_init(struct drm_device *dev, void *data,
433 struct drm_file *file_priv)
434{
435 drm_i810_private_t *dev_priv;
436 drm_i810_init_t *init = data;
437 int retcode = 0;
438
439 switch (init->func) {
440 case I810_INIT_DMA_1_4:
441 DRM_INFO("Using v1.4 init.\n");
442 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
443 DRM_MEM_DRIVER);
444 if (dev_priv == NULL)
445 return -ENOMEM;
446 retcode = i810_dma_initialize(dev, dev_priv, init);
447 break;
448
449 case I810_CLEANUP_DMA:
450 DRM_INFO("DMA Cleanup\n");
451 retcode = i810_dma_cleanup(dev);
452 break;
453 default:
454 return -EINVAL;
455 }
456
457 return retcode;
458}
459
460/* Most efficient way to verify state for the i810 is as it is
461 * emitted. Non-conformant state is silently dropped.
462 *
463 * Use 'volatile' & local var tmp to force the emitted values to be
464 * identical to the verified ones.
465 */
466static void i810EmitContextVerified(struct drm_device * dev,
467 volatile unsigned int *code)
468{
469 drm_i810_private_t *dev_priv = dev->dev_private;
470 int i, j = 0;
471 unsigned int tmp;
472 RING_LOCALS;
473
474 BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
475
476 OUT_RING(GFX_OP_COLOR_FACTOR);
477 OUT_RING(code[I810_CTXREG_CF1]);
478
479 OUT_RING(GFX_OP_STIPPLE);
480 OUT_RING(code[I810_CTXREG_ST1]);
481
482 for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
483 tmp = code[i];
484
485 if ((tmp & (7 << 29)) == (3 << 29) &&
486 (tmp & (0x1f << 24)) < (0x1d << 24)) {
487 OUT_RING(tmp);
488 j++;
489 } else
490 printk("constext state dropped!!!\n");
491 }
492
493 if (j & 1)
494 OUT_RING(0);
495
496 ADVANCE_LP_RING();
497}
498
499static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
500{
501 drm_i810_private_t *dev_priv = dev->dev_private;
502 int i, j = 0;
503 unsigned int tmp;
504 RING_LOCALS;
505
506 BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
507
508 OUT_RING(GFX_OP_MAP_INFO);
509 OUT_RING(code[I810_TEXREG_MI1]);
510 OUT_RING(code[I810_TEXREG_MI2]);
511 OUT_RING(code[I810_TEXREG_MI3]);
512
513 for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
514 tmp = code[i];
515
516 if ((tmp & (7 << 29)) == (3 << 29) &&
517 (tmp & (0x1f << 24)) < (0x1d << 24)) {
518 OUT_RING(tmp);
519 j++;
520 } else
521 printk("texture state dropped!!!\n");
522 }
523
524 if (j & 1)
525 OUT_RING(0);
526
527 ADVANCE_LP_RING();
528}
529
530/* Need to do some additional checking when setting the dest buffer.
531 */
532static void i810EmitDestVerified(struct drm_device * dev,
533 volatile unsigned int *code)
534{
535 drm_i810_private_t *dev_priv = dev->dev_private;
536 unsigned int tmp;
537 RING_LOCALS;
538
539 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
540
541 tmp = code[I810_DESTREG_DI1];
542 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
543 OUT_RING(CMD_OP_DESTBUFFER_INFO);
544 OUT_RING(tmp);
545 } else
546 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
547 tmp, dev_priv->front_di1, dev_priv->back_di1);
548
549 /* invarient:
550 */
551 OUT_RING(CMD_OP_Z_BUFFER_INFO);
552 OUT_RING(dev_priv->zi1);
553
554 OUT_RING(GFX_OP_DESTBUFFER_VARS);
555 OUT_RING(code[I810_DESTREG_DV1]);
556
557 OUT_RING(GFX_OP_DRAWRECT_INFO);
558 OUT_RING(code[I810_DESTREG_DR1]);
559 OUT_RING(code[I810_DESTREG_DR2]);
560 OUT_RING(code[I810_DESTREG_DR3]);
561 OUT_RING(code[I810_DESTREG_DR4]);
562 OUT_RING(0);
563
564 ADVANCE_LP_RING();
565}
566
567static void i810EmitState(struct drm_device * dev)
568{
569 drm_i810_private_t *dev_priv = dev->dev_private;
570 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
571 unsigned int dirty = sarea_priv->dirty;
572
573 DRM_DEBUG("%x\n", dirty);
574
575 if (dirty & I810_UPLOAD_BUFFERS) {
576 i810EmitDestVerified(dev, sarea_priv->BufferState);
577 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
578 }
579
580 if (dirty & I810_UPLOAD_CTX) {
581 i810EmitContextVerified(dev, sarea_priv->ContextState);
582 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
583 }
584
585 if (dirty & I810_UPLOAD_TEX0) {
586 i810EmitTexVerified(dev, sarea_priv->TexState[0]);
587 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
588 }
589
590 if (dirty & I810_UPLOAD_TEX1) {
591 i810EmitTexVerified(dev, sarea_priv->TexState[1]);
592 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
593 }
594}
595
596/* need to verify
597 */
598static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
599 unsigned int clear_color,
600 unsigned int clear_zval)
601{
602 drm_i810_private_t *dev_priv = dev->dev_private;
603 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
604 int nbox = sarea_priv->nbox;
605 struct drm_clip_rect *pbox = sarea_priv->boxes;
606 int pitch = dev_priv->pitch;
607 int cpp = 2;
608 int i;
609 RING_LOCALS;
610
611 if (dev_priv->current_page == 1) {
612 unsigned int tmp = flags;
613
614 flags &= ~(I810_FRONT | I810_BACK);
615 if (tmp & I810_FRONT)
616 flags |= I810_BACK;
617 if (tmp & I810_BACK)
618 flags |= I810_FRONT;
619 }
620
621 i810_kernel_lost_context(dev);
622
623 if (nbox > I810_NR_SAREA_CLIPRECTS)
624 nbox = I810_NR_SAREA_CLIPRECTS;
625
626 for (i = 0; i < nbox; i++, pbox++) {
627 unsigned int x = pbox->x1;
628 unsigned int y = pbox->y1;
629 unsigned int width = (pbox->x2 - x) * cpp;
630 unsigned int height = pbox->y2 - y;
631 unsigned int start = y * pitch + x * cpp;
632
633 if (pbox->x1 > pbox->x2 ||
634 pbox->y1 > pbox->y2 ||
635 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
636 continue;
637
638 if (flags & I810_FRONT) {
639 BEGIN_LP_RING(6);
640 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
641 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
642 OUT_RING((height << 16) | width);
643 OUT_RING(start);
644 OUT_RING(clear_color);
645 OUT_RING(0);
646 ADVANCE_LP_RING();
647 }
648
649 if (flags & I810_BACK) {
650 BEGIN_LP_RING(6);
651 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
652 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
653 OUT_RING((height << 16) | width);
654 OUT_RING(dev_priv->back_offset + start);
655 OUT_RING(clear_color);
656 OUT_RING(0);
657 ADVANCE_LP_RING();
658 }
659
660 if (flags & I810_DEPTH) {
661 BEGIN_LP_RING(6);
662 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
663 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
664 OUT_RING((height << 16) | width);
665 OUT_RING(dev_priv->depth_offset + start);
666 OUT_RING(clear_zval);
667 OUT_RING(0);
668 ADVANCE_LP_RING();
669 }
670 }
671}
672
673static void i810_dma_dispatch_swap(struct drm_device * dev)
674{
675 drm_i810_private_t *dev_priv = dev->dev_private;
676 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
677 int nbox = sarea_priv->nbox;
678 struct drm_clip_rect *pbox = sarea_priv->boxes;
679 int pitch = dev_priv->pitch;
680 int cpp = 2;
681 int i;
682 RING_LOCALS;
683
684 DRM_DEBUG("swapbuffers\n");
685
686 i810_kernel_lost_context(dev);
687
688 if (nbox > I810_NR_SAREA_CLIPRECTS)
689 nbox = I810_NR_SAREA_CLIPRECTS;
690
691 for (i = 0; i < nbox; i++, pbox++) {
692 unsigned int w = pbox->x2 - pbox->x1;
693 unsigned int h = pbox->y2 - pbox->y1;
694 unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
695 unsigned int start = dst;
696
697 if (pbox->x1 > pbox->x2 ||
698 pbox->y1 > pbox->y2 ||
699 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
700 continue;
701
702 BEGIN_LP_RING(6);
703 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
704 OUT_RING(pitch | (0xCC << 16));
705 OUT_RING((h << 16) | (w * cpp));
706 if (dev_priv->current_page == 0)
707 OUT_RING(dev_priv->front_offset + start);
708 else
709 OUT_RING(dev_priv->back_offset + start);
710 OUT_RING(pitch);
711 if (dev_priv->current_page == 0)
712 OUT_RING(dev_priv->back_offset + start);
713 else
714 OUT_RING(dev_priv->front_offset + start);
715 ADVANCE_LP_RING();
716 }
717}
718
719static void i810_dma_dispatch_vertex(struct drm_device * dev,
720 struct drm_buf * buf, int discard, int used)
721{
722 drm_i810_private_t *dev_priv = dev->dev_private;
723 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
724 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
725 struct drm_clip_rect *box = sarea_priv->boxes;
726 int nbox = sarea_priv->nbox;
727 unsigned long address = (unsigned long)buf->bus_address;
728 unsigned long start = address - dev->agp->base;
729 int i = 0;
730 RING_LOCALS;
731
732 i810_kernel_lost_context(dev);
733
734 if (nbox > I810_NR_SAREA_CLIPRECTS)
735 nbox = I810_NR_SAREA_CLIPRECTS;
736
737 if (used > 4 * 1024)
738 used = 0;
739
740 if (sarea_priv->dirty)
741 i810EmitState(dev);
742
743 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
744 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
745
746 *(u32 *) buf_priv->kernel_virtual =
747 ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
748
749 if (used & 4) {
750 *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
751 used += 4;
752 }
753
754 i810_unmap_buffer(buf);
755 }
756
757 if (used) {
758 do {
759 if (i < nbox) {
760 BEGIN_LP_RING(4);
761 OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
762 SC_ENABLE);
763 OUT_RING(GFX_OP_SCISSOR_INFO);
764 OUT_RING(box[i].x1 | (box[i].y1 << 16));
765 OUT_RING((box[i].x2 -
766 1) | ((box[i].y2 - 1) << 16));
767 ADVANCE_LP_RING();
768 }
769
770 BEGIN_LP_RING(4);
771 OUT_RING(CMD_OP_BATCH_BUFFER);
772 OUT_RING(start | BB1_PROTECTED);
773 OUT_RING(start + used - 4);
774 OUT_RING(0);
775 ADVANCE_LP_RING();
776
777 } while (++i < nbox);
778 }
779
780 if (discard) {
781 dev_priv->counter++;
782
783 (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
784 I810_BUF_HARDWARE);
785
786 BEGIN_LP_RING(8);
787 OUT_RING(CMD_STORE_DWORD_IDX);
788 OUT_RING(20);
789 OUT_RING(dev_priv->counter);
790 OUT_RING(CMD_STORE_DWORD_IDX);
791 OUT_RING(buf_priv->my_use_idx);
792 OUT_RING(I810_BUF_FREE);
793 OUT_RING(CMD_REPORT_HEAD);
794 OUT_RING(0);
795 ADVANCE_LP_RING();
796 }
797}
798
799static void i810_dma_dispatch_flip(struct drm_device * dev)
800{
801 drm_i810_private_t *dev_priv = dev->dev_private;
802 int pitch = dev_priv->pitch;
803 RING_LOCALS;
804
805 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
806 dev_priv->current_page,
807 dev_priv->sarea_priv->pf_current_page);
808
809 i810_kernel_lost_context(dev);
810
811 BEGIN_LP_RING(2);
812 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
813 OUT_RING(0);
814 ADVANCE_LP_RING();
815
816 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
817 /* On i815 at least ASYNC is buggy */
818 /* pitch<<5 is from 11.2.8 p158,
819 its the pitch / 8 then left shifted 8,
820 so (pitch >> 3) << 8 */
821 OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
822 if (dev_priv->current_page == 0) {
823 OUT_RING(dev_priv->back_offset);
824 dev_priv->current_page = 1;
825 } else {
826 OUT_RING(dev_priv->front_offset);
827 dev_priv->current_page = 0;
828 }
829 OUT_RING(0);
830 ADVANCE_LP_RING();
831
832 BEGIN_LP_RING(2);
833 OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
834 OUT_RING(0);
835 ADVANCE_LP_RING();
836
837 /* Increment the frame counter. The client-side 3D driver must
838 * throttle the framerate by waiting for this value before
839 * performing the swapbuffer ioctl.
840 */
841 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
842
843}
844
845static void i810_dma_quiescent(struct drm_device * dev)
846{
847 drm_i810_private_t *dev_priv = dev->dev_private;
848 RING_LOCALS;
849
850 i810_kernel_lost_context(dev);
851
852 BEGIN_LP_RING(4);
853 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
854 OUT_RING(CMD_REPORT_HEAD);
855 OUT_RING(0);
856 OUT_RING(0);
857 ADVANCE_LP_RING();
858
859 i810_wait_ring(dev, dev_priv->ring.Size - 8);
860}
861
862static int i810_flush_queue(struct drm_device * dev)
863{
864 drm_i810_private_t *dev_priv = dev->dev_private;
865 struct drm_device_dma *dma = dev->dma;
866 int i, ret = 0;
867 RING_LOCALS;
868
869 i810_kernel_lost_context(dev);
870
871 BEGIN_LP_RING(2);
872 OUT_RING(CMD_REPORT_HEAD);
873 OUT_RING(0);
874 ADVANCE_LP_RING();
875
876 i810_wait_ring(dev, dev_priv->ring.Size - 8);
877
878 for (i = 0; i < dma->buf_count; i++) {
879 struct drm_buf *buf = dma->buflist[i];
880 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
881
882 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
883 I810_BUF_FREE);
884
885 if (used == I810_BUF_HARDWARE)
886 DRM_DEBUG("reclaimed from HARDWARE\n");
887 if (used == I810_BUF_CLIENT)
888 DRM_DEBUG("still on client\n");
889 }
890
891 return ret;
892}
893
894/* Must be called with the lock held */
895static void i810_reclaim_buffers(struct drm_device * dev,
896 struct drm_file *file_priv)
897{
898 struct drm_device_dma *dma = dev->dma;
899 int i;
900
901 if (!dma)
902 return;
903 if (!dev->dev_private)
904 return;
905 if (!dma->buflist)
906 return;
907
908 i810_flush_queue(dev);
909
910 for (i = 0; i < dma->buf_count; i++) {
911 struct drm_buf *buf = dma->buflist[i];
912 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
913
914 if (buf->file_priv == file_priv && buf_priv) {
915 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
916 I810_BUF_FREE);
917
918 if (used == I810_BUF_CLIENT)
919 DRM_DEBUG("reclaimed from client\n");
920 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
921 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
922 }
923 }
924}
925
926static int i810_flush_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv)
928{
929 LOCK_TEST_WITH_RETURN(dev, file_priv);
930
931 i810_flush_queue(dev);
932 return 0;
933}
934
935static int i810_dma_vertex(struct drm_device *dev, void *data,
936 struct drm_file *file_priv)
937{
938 struct drm_device_dma *dma = dev->dma;
939 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
940 u32 *hw_status = dev_priv->hw_status_page;
941 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
942 dev_priv->sarea_priv;
943 drm_i810_vertex_t *vertex = data;
944
945 LOCK_TEST_WITH_RETURN(dev, file_priv);
946
947 DRM_DEBUG("idx %d used %d discard %d\n",
948 vertex->idx, vertex->used, vertex->discard);
949
950 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
951 return -EINVAL;
952
953 i810_dma_dispatch_vertex(dev,
954 dma->buflist[vertex->idx],
955 vertex->discard, vertex->used);
956
957 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
958 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
959 sarea_priv->last_enqueue = dev_priv->counter - 1;
960 sarea_priv->last_dispatch = (int)hw_status[5];
961
962 return 0;
963}
964
965static int i810_clear_bufs(struct drm_device *dev, void *data,
966 struct drm_file *file_priv)
967{
968 drm_i810_clear_t *clear = data;
969
970 LOCK_TEST_WITH_RETURN(dev, file_priv);
971
972 /* GH: Someone's doing nasty things... */
973 if (!dev->dev_private) {
974 return -EINVAL;
975 }
976
977 i810_dma_dispatch_clear(dev, clear->flags,
978 clear->clear_color, clear->clear_depth);
979 return 0;
980}
981
982static int i810_swap_bufs(struct drm_device *dev, void *data,
983 struct drm_file *file_priv)
984{
985 DRM_DEBUG("\n");
986
987 LOCK_TEST_WITH_RETURN(dev, file_priv);
988
989 i810_dma_dispatch_swap(dev);
990 return 0;
991}
992
993static int i810_getage(struct drm_device *dev, void *data,
994 struct drm_file *file_priv)
995{
996 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
997 u32 *hw_status = dev_priv->hw_status_page;
998 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
999 dev_priv->sarea_priv;
1000
1001 sarea_priv->last_dispatch = (int)hw_status[5];
1002 return 0;
1003}
1004
1005static int i810_getbuf(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv)
1007{
1008 int retcode = 0;
1009 drm_i810_dma_t *d = data;
1010 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1011 u32 *hw_status = dev_priv->hw_status_page;
1012 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1013 dev_priv->sarea_priv;
1014
1015 LOCK_TEST_WITH_RETURN(dev, file_priv);
1016
1017 d->granted = 0;
1018
1019 retcode = i810_dma_get_buffer(dev, d, file_priv);
1020
1021 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1022 task_pid_nr(current), retcode, d->granted);
1023
1024 sarea_priv->last_dispatch = (int)hw_status[5];
1025
1026 return retcode;
1027}
1028
1029static int i810_copybuf(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv)
1031{
1032 /* Never copy - 2.4.x doesn't need it */
1033 return 0;
1034}
1035
1036static int i810_docopy(struct drm_device *dev, void *data,
1037 struct drm_file *file_priv)
1038{
1039 /* Never copy - 2.4.x doesn't need it */
1040 return 0;
1041}
1042
1043static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
1044 unsigned int last_render)
1045{
1046 drm_i810_private_t *dev_priv = dev->dev_private;
1047 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1048 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1049 unsigned long address = (unsigned long)buf->bus_address;
1050 unsigned long start = address - dev->agp->base;
1051 int u;
1052 RING_LOCALS;
1053
1054 i810_kernel_lost_context(dev);
1055
1056 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1057 if (u != I810_BUF_CLIENT) {
1058 DRM_DEBUG("MC found buffer that isn't mine!\n");
1059 }
1060
1061 if (used > 4 * 1024)
1062 used = 0;
1063
1064 sarea_priv->dirty = 0x7f;
1065
1066 DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
1067
1068 dev_priv->counter++;
1069 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1070 DRM_DEBUG("start : %lx\n", start);
1071 DRM_DEBUG("used : %d\n", used);
1072 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1073
1074 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1075 if (used & 4) {
1076 *(u32 *) ((char *) buf_priv->virtual + used) = 0;
1077 used += 4;
1078 }
1079
1080 i810_unmap_buffer(buf);
1081 }
1082 BEGIN_LP_RING(4);
1083 OUT_RING(CMD_OP_BATCH_BUFFER);
1084 OUT_RING(start | BB1_PROTECTED);
1085 OUT_RING(start + used - 4);
1086 OUT_RING(0);
1087 ADVANCE_LP_RING();
1088
1089 BEGIN_LP_RING(8);
1090 OUT_RING(CMD_STORE_DWORD_IDX);
1091 OUT_RING(buf_priv->my_use_idx);
1092 OUT_RING(I810_BUF_FREE);
1093 OUT_RING(0);
1094
1095 OUT_RING(CMD_STORE_DWORD_IDX);
1096 OUT_RING(16);
1097 OUT_RING(last_render);
1098 OUT_RING(0);
1099 ADVANCE_LP_RING();
1100}
1101
1102static int i810_dma_mc(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1104{
1105 struct drm_device_dma *dma = dev->dma;
1106 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1107 u32 *hw_status = dev_priv->hw_status_page;
1108 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1109 dev_priv->sarea_priv;
1110 drm_i810_mc_t *mc = data;
1111
1112 LOCK_TEST_WITH_RETURN(dev, file_priv);
1113
1114 if (mc->idx >= dma->buf_count || mc->idx < 0)
1115 return -EINVAL;
1116
1117 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1118 mc->last_render);
1119
1120 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1121 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1122 sarea_priv->last_enqueue = dev_priv->counter - 1;
1123 sarea_priv->last_dispatch = (int)hw_status[5];
1124
1125 return 0;
1126}
1127
1128static int i810_rstatus(struct drm_device *dev, void *data,
1129 struct drm_file *file_priv)
1130{
1131 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1132
1133 return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1134}
1135
1136static int i810_ov0_info(struct drm_device *dev, void *data,
1137 struct drm_file *file_priv)
1138{
1139 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1140 drm_i810_overlay_t *ov = data;
1141
1142 ov->offset = dev_priv->overlay_offset;
1143 ov->physical = dev_priv->overlay_physical;
1144
1145 return 0;
1146}
1147
1148static int i810_fstatus(struct drm_device *dev, void *data,
1149 struct drm_file *file_priv)
1150{
1151 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1152
1153 LOCK_TEST_WITH_RETURN(dev, file_priv);
1154 return I810_READ(0x30008);
1155}
1156
1157static int i810_ov0_flip(struct drm_device *dev, void *data,
1158 struct drm_file *file_priv)
1159{
1160 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1161
1162 LOCK_TEST_WITH_RETURN(dev, file_priv);
1163
1164 //Tell the overlay to update
1165 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1166
1167 return 0;
1168}
1169
1170/* Not sure why this isn't set all the time:
1171 */
1172static void i810_do_init_pageflip(struct drm_device * dev)
1173{
1174 drm_i810_private_t *dev_priv = dev->dev_private;
1175
1176 DRM_DEBUG("\n");
1177 dev_priv->page_flipping = 1;
1178 dev_priv->current_page = 0;
1179 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1180}
1181
1182static int i810_do_cleanup_pageflip(struct drm_device * dev)
1183{
1184 drm_i810_private_t *dev_priv = dev->dev_private;
1185
1186 DRM_DEBUG("\n");
1187 if (dev_priv->current_page != 0)
1188 i810_dma_dispatch_flip(dev);
1189
1190 dev_priv->page_flipping = 0;
1191 return 0;
1192}
1193
1194static int i810_flip_bufs(struct drm_device *dev, void *data,
1195 struct drm_file *file_priv)
1196{
1197 drm_i810_private_t *dev_priv = dev->dev_private;
1198
1199 DRM_DEBUG("\n");
1200
1201 LOCK_TEST_WITH_RETURN(dev, file_priv);
1202
1203 if (!dev_priv->page_flipping)
1204 i810_do_init_pageflip(dev);
1205
1206 i810_dma_dispatch_flip(dev);
1207 return 0;
1208}
1209
1210int i810_driver_load(struct drm_device *dev, unsigned long flags)
1211{
1212 /* i810 has 4 more counters */
1213 dev->counters += 4;
1214 dev->types[6] = _DRM_STAT_IRQ;
1215 dev->types[7] = _DRM_STAT_PRIMARY;
1216 dev->types[8] = _DRM_STAT_SECONDARY;
1217 dev->types[9] = _DRM_STAT_DMA;
1218
1219 return 0;
1220}
1221
1222void i810_driver_lastclose(struct drm_device * dev)
1223{
1224 i810_dma_cleanup(dev);
1225}
1226
1227void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1228{
1229 if (dev->dev_private) {
1230 drm_i810_private_t *dev_priv = dev->dev_private;
1231 if (dev_priv->page_flipping) {
1232 i810_do_cleanup_pageflip(dev);
1233 }
1234 }
1235}
1236
1237void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
1238 struct drm_file *file_priv)
1239{
1240 i810_reclaim_buffers(dev, file_priv);
1241}
1242
1243int i810_driver_dma_quiescent(struct drm_device * dev)
1244{
1245 i810_dma_quiescent(dev);
1246 return 0;
1247}
1248
1249struct drm_ioctl_desc i810_ioctls[] = {
1250 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1251 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
1252 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
1253 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
1254 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
1255 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
1256 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
1257 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
1258 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
1259 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
1260 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
1261 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
1262 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1263 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
1264 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
1265};
1266
1267int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1268
1269/**
1270 * Determine if the device really is AGP or not.
1271 *
1272 * All Intel graphics chipsets are treated as AGP, even if they are really
1273 * PCI-e.
1274 *
1275 * \param dev The device to be tested.
1276 *
1277 * \returns
1278 * A value of 1 is always retured to indictate every i810 is AGP.
1279 */
1280int i810_driver_device_is_agp(struct drm_device * dev)
1281{
1282 return 1;
1283}