aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c858
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c605
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1142
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c222
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c623
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c386
7 files changed, 3846 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
7
8i915-$(CONFIG_COMPAT) += i915_ioc32.o
9
10obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 000000000000..88974342933c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,858 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
38 */
39int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40{
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
44 int i;
45
46 for (i = 0; i < 10000; i++) {
47 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48 ring->space = ring->head - (ring->tail + 8);
49 if (ring->space < 0)
50 ring->space += ring->Size;
51 if (ring->space >= n)
52 return 0;
53
54 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55
56 if (ring->head != last_head)
57 i = 0;
58
59 last_head = ring->head;
60 }
61
62 return -EBUSY;
63}
64
65void i915_kernel_lost_context(struct drm_device * dev)
66{
67 drm_i915_private_t *dev_priv = dev->dev_private;
68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69
70 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72 ring->space = ring->head - (ring->tail + 8);
73 if (ring->space < 0)
74 ring->space += ring->Size;
75
76 if (ring->head == ring->tail)
77 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78}
79
80static int i915_dma_cleanup(struct drm_device * dev)
81{
82 drm_i915_private_t *dev_priv = dev->dev_private;
83 /* Make sure interrupts are disabled here because the uninstall ioctl
84 * may not have been called from userspace and after dev_private
85 * is freed, it's too late.
86 */
87 if (dev->irq)
88 drm_irq_uninstall(dev);
89
90 if (dev_priv->ring.virtual_start) {
91 drm_core_ioremapfree(&dev_priv->ring.map, dev);
92 dev_priv->ring.virtual_start = 0;
93 dev_priv->ring.map.handle = 0;
94 dev_priv->ring.map.size = 0;
95 }
96
97 if (dev_priv->status_page_dmah) {
98 drm_pci_free(dev, dev_priv->status_page_dmah);
99 dev_priv->status_page_dmah = NULL;
100 /* Need to rewrite hardware status page */
101 I915_WRITE(0x02080, 0x1ffff000);
102 }
103
104 if (dev_priv->status_gfx_addr) {
105 dev_priv->status_gfx_addr = 0;
106 drm_core_ioremapfree(&dev_priv->hws_map, dev);
107 I915_WRITE(0x2080, 0x1ffff000);
108 }
109
110 return 0;
111}
112
113static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
114{
115 drm_i915_private_t *dev_priv = dev->dev_private;
116
117 dev_priv->sarea = drm_getsarea(dev);
118 if (!dev_priv->sarea) {
119 DRM_ERROR("can not find sarea!\n");
120 i915_dma_cleanup(dev);
121 return -EINVAL;
122 }
123
124 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
125 if (!dev_priv->mmio_map) {
126 i915_dma_cleanup(dev);
127 DRM_ERROR("can not find mmio map!\n");
128 return -EINVAL;
129 }
130
131 dev_priv->sarea_priv = (drm_i915_sarea_t *)
132 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
133
134 dev_priv->ring.Start = init->ring_start;
135 dev_priv->ring.End = init->ring_end;
136 dev_priv->ring.Size = init->ring_size;
137 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
138
139 dev_priv->ring.map.offset = init->ring_start;
140 dev_priv->ring.map.size = init->ring_size;
141 dev_priv->ring.map.type = 0;
142 dev_priv->ring.map.flags = 0;
143 dev_priv->ring.map.mtrr = 0;
144
145 drm_core_ioremap(&dev_priv->ring.map, dev);
146
147 if (dev_priv->ring.map.handle == NULL) {
148 i915_dma_cleanup(dev);
149 DRM_ERROR("can not ioremap virtual address for"
150 " ring buffer\n");
151 return -ENOMEM;
152 }
153
154 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
155
156 dev_priv->cpp = init->cpp;
157 dev_priv->back_offset = init->back_offset;
158 dev_priv->front_offset = init->front_offset;
159 dev_priv->current_page = 0;
160 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
161
162 /* We are using separate values as placeholders for mechanisms for
163 * private backbuffer/depthbuffer usage.
164 */
165 dev_priv->use_mi_batchbuffer_start = 0;
166 if (IS_I965G(dev)) /* 965 doesn't support older method */
167 dev_priv->use_mi_batchbuffer_start = 1;
168
169 /* Allow hardware batchbuffers unless told otherwise.
170 */
171 dev_priv->allow_batchbuffer = 1;
172
173 /* Program Hardware Status Page */
174 if (!I915_NEED_GFX_HWS(dev)) {
175 dev_priv->status_page_dmah =
176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
177
178 if (!dev_priv->status_page_dmah) {
179 i915_dma_cleanup(dev);
180 DRM_ERROR("Can not allocate hardware status page\n");
181 return -ENOMEM;
182 }
183 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
184 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
185
186 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
187 I915_WRITE(0x02080, dev_priv->dma_status_page);
188 }
189 DRM_DEBUG("Enabled hardware status page\n");
190 return 0;
191}
192
193static int i915_dma_resume(struct drm_device * dev)
194{
195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196
197 DRM_DEBUG("%s\n", __func__);
198
199 if (!dev_priv->sarea) {
200 DRM_ERROR("can not find sarea!\n");
201 return -EINVAL;
202 }
203
204 if (!dev_priv->mmio_map) {
205 DRM_ERROR("can not find mmio map!\n");
206 return -EINVAL;
207 }
208
209 if (dev_priv->ring.map.handle == NULL) {
210 DRM_ERROR("can not ioremap virtual address for"
211 " ring buffer\n");
212 return -ENOMEM;
213 }
214
215 /* Program Hardware Status Page */
216 if (!dev_priv->hw_status_page) {
217 DRM_ERROR("Can not find hardware status page\n");
218 return -EINVAL;
219 }
220 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
221
222 if (dev_priv->status_gfx_addr != 0)
223 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
224 else
225 I915_WRITE(0x02080, dev_priv->dma_status_page);
226 DRM_DEBUG("Enabled hardware status page\n");
227
228 return 0;
229}
230
231static int i915_dma_init(struct drm_device *dev, void *data,
232 struct drm_file *file_priv)
233{
234 drm_i915_init_t *init = data;
235 int retcode = 0;
236
237 switch (init->func) {
238 case I915_INIT_DMA:
239 retcode = i915_initialize(dev, init);
240 break;
241 case I915_CLEANUP_DMA:
242 retcode = i915_dma_cleanup(dev);
243 break;
244 case I915_RESUME_DMA:
245 retcode = i915_dma_resume(dev);
246 break;
247 default:
248 retcode = -EINVAL;
249 break;
250 }
251
252 return retcode;
253}
254
255/* Implement basically the same security restrictions as hardware does
256 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
257 *
258 * Most of the calculations below involve calculating the size of a
259 * particular instruction. It's important to get the size right as
260 * that tells us where the next instruction to check is. Any illegal
261 * instruction detected will be given a size of zero, which is a
262 * signal to abort the rest of the buffer.
263 */
264static int do_validate_cmd(int cmd)
265{
266 switch (((cmd >> 29) & 0x7)) {
267 case 0x0:
268 switch ((cmd >> 23) & 0x3f) {
269 case 0x0:
270 return 1; /* MI_NOOP */
271 case 0x4:
272 return 1; /* MI_FLUSH */
273 default:
274 return 0; /* disallow everything else */
275 }
276 break;
277 case 0x1:
278 return 0; /* reserved */
279 case 0x2:
280 return (cmd & 0xff) + 2; /* 2d commands */
281 case 0x3:
282 if (((cmd >> 24) & 0x1f) <= 0x18)
283 return 1;
284
285 switch ((cmd >> 24) & 0x1f) {
286 case 0x1c:
287 return 1;
288 case 0x1d:
289 switch ((cmd >> 16) & 0xff) {
290 case 0x3:
291 return (cmd & 0x1f) + 2;
292 case 0x4:
293 return (cmd & 0xf) + 2;
294 default:
295 return (cmd & 0xffff) + 2;
296 }
297 case 0x1e:
298 if (cmd & (1 << 23))
299 return (cmd & 0xffff) + 1;
300 else
301 return 1;
302 case 0x1f:
303 if ((cmd & (1 << 23)) == 0) /* inline vertices */
304 return (cmd & 0x1ffff) + 2;
305 else if (cmd & (1 << 17)) /* indirect random */
306 if ((cmd & 0xffff) == 0)
307 return 0; /* unknown length, too hard */
308 else
309 return (((cmd & 0xffff) + 1) / 2) + 1;
310 else
311 return 2; /* indirect sequential */
312 default:
313 return 0;
314 }
315 default:
316 return 0;
317 }
318
319 return 0;
320}
321
322static int validate_cmd(int cmd)
323{
324 int ret = do_validate_cmd(cmd);
325
326/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
327
328 return ret;
329}
330
331static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
332{
333 drm_i915_private_t *dev_priv = dev->dev_private;
334 int i;
335 RING_LOCALS;
336
337 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
338 return -EINVAL;
339
340 BEGIN_LP_RING((dwords+1)&~1);
341
342 for (i = 0; i < dwords;) {
343 int cmd, sz;
344
345 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
346 return -EINVAL;
347
348 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
349 return -EINVAL;
350
351 OUT_RING(cmd);
352
353 while (++i, --sz) {
354 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
355 sizeof(cmd))) {
356 return -EINVAL;
357 }
358 OUT_RING(cmd);
359 }
360 }
361
362 if (dwords & 1)
363 OUT_RING(0);
364
365 ADVANCE_LP_RING();
366
367 return 0;
368}
369
370static int i915_emit_box(struct drm_device * dev,
371 struct drm_clip_rect __user * boxes,
372 int i, int DR1, int DR4)
373{
374 drm_i915_private_t *dev_priv = dev->dev_private;
375 struct drm_clip_rect box;
376 RING_LOCALS;
377
378 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
379 return -EFAULT;
380 }
381
382 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
383 DRM_ERROR("Bad box %d,%d..%d,%d\n",
384 box.x1, box.y1, box.x2, box.y2);
385 return -EINVAL;
386 }
387
388 if (IS_I965G(dev)) {
389 BEGIN_LP_RING(4);
390 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
391 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
392 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
393 OUT_RING(DR4);
394 ADVANCE_LP_RING();
395 } else {
396 BEGIN_LP_RING(6);
397 OUT_RING(GFX_OP_DRAWRECT_INFO);
398 OUT_RING(DR1);
399 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
400 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
401 OUT_RING(DR4);
402 OUT_RING(0);
403 ADVANCE_LP_RING();
404 }
405
406 return 0;
407}
408
409/* XXX: Emitting the counter should really be moved to part of the IRQ
410 * emit. For now, do it in both places:
411 */
412
413static void i915_emit_breadcrumb(struct drm_device *dev)
414{
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 RING_LOCALS;
417
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
419
420 if (dev_priv->counter > 0x7FFFFFFFUL)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
422
423 BEGIN_LP_RING(4);
424 OUT_RING(CMD_STORE_DWORD_IDX);
425 OUT_RING(20);
426 OUT_RING(dev_priv->counter);
427 OUT_RING(0);
428 ADVANCE_LP_RING();
429}
430
431static int i915_dispatch_cmdbuffer(struct drm_device * dev,
432 drm_i915_cmdbuffer_t * cmd)
433{
434 int nbox = cmd->num_cliprects;
435 int i = 0, count, ret;
436
437 if (cmd->sz & 0x3) {
438 DRM_ERROR("alignment");
439 return -EINVAL;
440 }
441
442 i915_kernel_lost_context(dev);
443
444 count = nbox ? nbox : 1;
445
446 for (i = 0; i < count; i++) {
447 if (i < nbox) {
448 ret = i915_emit_box(dev, cmd->cliprects, i,
449 cmd->DR1, cmd->DR4);
450 if (ret)
451 return ret;
452 }
453
454 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
455 if (ret)
456 return ret;
457 }
458
459 i915_emit_breadcrumb(dev);
460 return 0;
461}
462
463static int i915_dispatch_batchbuffer(struct drm_device * dev,
464 drm_i915_batchbuffer_t * batch)
465{
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 struct drm_clip_rect __user *boxes = batch->cliprects;
468 int nbox = batch->num_cliprects;
469 int i = 0, count;
470 RING_LOCALS;
471
472 if ((batch->start | batch->used) & 0x7) {
473 DRM_ERROR("alignment");
474 return -EINVAL;
475 }
476
477 i915_kernel_lost_context(dev);
478
479 count = nbox ? nbox : 1;
480
481 for (i = 0; i < count; i++) {
482 if (i < nbox) {
483 int ret = i915_emit_box(dev, boxes, i,
484 batch->DR1, batch->DR4);
485 if (ret)
486 return ret;
487 }
488
489 if (dev_priv->use_mi_batchbuffer_start) {
490 BEGIN_LP_RING(2);
491 if (IS_I965G(dev)) {
492 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
493 OUT_RING(batch->start);
494 } else {
495 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
496 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
497 }
498 ADVANCE_LP_RING();
499 } else {
500 BEGIN_LP_RING(4);
501 OUT_RING(MI_BATCH_BUFFER);
502 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
503 OUT_RING(batch->start + batch->used - 4);
504 OUT_RING(0);
505 ADVANCE_LP_RING();
506 }
507 }
508
509 i915_emit_breadcrumb(dev);
510
511 return 0;
512}
513
514static int i915_dispatch_flip(struct drm_device * dev)
515{
516 drm_i915_private_t *dev_priv = dev->dev_private;
517 RING_LOCALS;
518
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
520 __FUNCTION__,
521 dev_priv->current_page,
522 dev_priv->sarea_priv->pf_current_page);
523
524 i915_kernel_lost_context(dev);
525
526 BEGIN_LP_RING(2);
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
528 OUT_RING(0);
529 ADVANCE_LP_RING();
530
531 BEGIN_LP_RING(6);
532 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
533 OUT_RING(0);
534 if (dev_priv->current_page == 0) {
535 OUT_RING(dev_priv->back_offset);
536 dev_priv->current_page = 1;
537 } else {
538 OUT_RING(dev_priv->front_offset);
539 dev_priv->current_page = 0;
540 }
541 OUT_RING(0);
542 ADVANCE_LP_RING();
543
544 BEGIN_LP_RING(2);
545 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
546 OUT_RING(0);
547 ADVANCE_LP_RING();
548
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
550
551 BEGIN_LP_RING(4);
552 OUT_RING(CMD_STORE_DWORD_IDX);
553 OUT_RING(20);
554 OUT_RING(dev_priv->counter);
555 OUT_RING(0);
556 ADVANCE_LP_RING();
557
558 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
559 return 0;
560}
561
562static int i915_quiescent(struct drm_device * dev)
563{
564 drm_i915_private_t *dev_priv = dev->dev_private;
565
566 i915_kernel_lost_context(dev);
567 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
568}
569
570static int i915_flush_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv)
572{
573 LOCK_TEST_WITH_RETURN(dev, file_priv);
574
575 return i915_quiescent(dev);
576}
577
578static int i915_batchbuffer(struct drm_device *dev, void *data,
579 struct drm_file *file_priv)
580{
581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582 u32 *hw_status = dev_priv->hw_status_page;
583 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
584 dev_priv->sarea_priv;
585 drm_i915_batchbuffer_t *batch = data;
586 int ret;
587
588 if (!dev_priv->allow_batchbuffer) {
589 DRM_ERROR("Batchbuffer ioctl disabled\n");
590 return -EINVAL;
591 }
592
593 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
594 batch->start, batch->used, batch->num_cliprects);
595
596 LOCK_TEST_WITH_RETURN(dev, file_priv);
597
598 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
599 batch->num_cliprects *
600 sizeof(struct drm_clip_rect)))
601 return -EFAULT;
602
603 ret = i915_dispatch_batchbuffer(dev, batch);
604
605 sarea_priv->last_dispatch = (int)hw_status[5];
606 return ret;
607}
608
609static int i915_cmdbuffer(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
611{
612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
613 u32 *hw_status = dev_priv->hw_status_page;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv;
616 drm_i915_cmdbuffer_t *cmdbuf = data;
617 int ret;
618
619 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
620 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
621
622 LOCK_TEST_WITH_RETURN(dev, file_priv);
623
624 if (cmdbuf->num_cliprects &&
625 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
626 cmdbuf->num_cliprects *
627 sizeof(struct drm_clip_rect))) {
628 DRM_ERROR("Fault accessing cliprects\n");
629 return -EFAULT;
630 }
631
632 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
633 if (ret) {
634 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
635 return ret;
636 }
637
638 sarea_priv->last_dispatch = (int)hw_status[5];
639 return 0;
640}
641
642static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv)
644{
645 DRM_DEBUG("%s\n", __FUNCTION__);
646
647 LOCK_TEST_WITH_RETURN(dev, file_priv);
648
649 return i915_dispatch_flip(dev);
650}
651
652static int i915_getparam(struct drm_device *dev, void *data,
653 struct drm_file *file_priv)
654{
655 drm_i915_private_t *dev_priv = dev->dev_private;
656 drm_i915_getparam_t *param = data;
657 int value;
658
659 if (!dev_priv) {
660 DRM_ERROR("called with no initialization\n");
661 return -EINVAL;
662 }
663
664 switch (param->param) {
665 case I915_PARAM_IRQ_ACTIVE:
666 value = dev->irq ? 1 : 0;
667 break;
668 case I915_PARAM_ALLOW_BATCHBUFFER:
669 value = dev_priv->allow_batchbuffer ? 1 : 0;
670 break;
671 case I915_PARAM_LAST_DISPATCH:
672 value = READ_BREADCRUMB(dev_priv);
673 break;
674 default:
675 DRM_ERROR("Unknown parameter %d\n", param->param);
676 return -EINVAL;
677 }
678
679 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
680 DRM_ERROR("DRM_COPY_TO_USER failed\n");
681 return -EFAULT;
682 }
683
684 return 0;
685}
686
687static int i915_setparam(struct drm_device *dev, void *data,
688 struct drm_file *file_priv)
689{
690 drm_i915_private_t *dev_priv = dev->dev_private;
691 drm_i915_setparam_t *param = data;
692
693 if (!dev_priv) {
694 DRM_ERROR("called with no initialization\n");
695 return -EINVAL;
696 }
697
698 switch (param->param) {
699 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
700 if (!IS_I965G(dev))
701 dev_priv->use_mi_batchbuffer_start = param->value;
702 break;
703 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
704 dev_priv->tex_lru_log_granularity = param->value;
705 break;
706 case I915_SETPARAM_ALLOW_BATCHBUFFER:
707 dev_priv->allow_batchbuffer = param->value;
708 break;
709 default:
710 DRM_ERROR("unknown parameter %d\n", param->param);
711 return -EINVAL;
712 }
713
714 return 0;
715}
716
717static int i915_set_status_page(struct drm_device *dev, void *data,
718 struct drm_file *file_priv)
719{
720 drm_i915_private_t *dev_priv = dev->dev_private;
721 drm_i915_hws_addr_t *hws = data;
722
723 if (!I915_NEED_GFX_HWS(dev))
724 return -EINVAL;
725
726 if (!dev_priv) {
727 DRM_ERROR("called with no initialization\n");
728 return -EINVAL;
729 }
730
731 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
732
733 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
734
735 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
736 dev_priv->hws_map.size = 4*1024;
737 dev_priv->hws_map.type = 0;
738 dev_priv->hws_map.flags = 0;
739 dev_priv->hws_map.mtrr = 0;
740
741 drm_core_ioremap(&dev_priv->hws_map, dev);
742 if (dev_priv->hws_map.handle == NULL) {
743 i915_dma_cleanup(dev);
744 dev_priv->status_gfx_addr = 0;
745 DRM_ERROR("can not ioremap virtual address for"
746 " G33 hw status page\n");
747 return -ENOMEM;
748 }
749 dev_priv->hw_status_page = dev_priv->hws_map.handle;
750
751 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
752 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
753 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
754 dev_priv->status_gfx_addr);
755 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
756 return 0;
757}
758
759int i915_driver_load(struct drm_device *dev, unsigned long flags)
760{
761 struct drm_i915_private *dev_priv = dev->dev_private;
762 unsigned long base, size;
763 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
764
765 /* i915 has 4 more counters */
766 dev->counters += 4;
767 dev->types[6] = _DRM_STAT_IRQ;
768 dev->types[7] = _DRM_STAT_PRIMARY;
769 dev->types[8] = _DRM_STAT_SECONDARY;
770 dev->types[9] = _DRM_STAT_DMA;
771
772 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
773 if (dev_priv == NULL)
774 return -ENOMEM;
775
776 memset(dev_priv, 0, sizeof(drm_i915_private_t));
777
778 dev->dev_private = (void *)dev_priv;
779
780 /* Add register map (needed for suspend/resume) */
781 base = drm_get_resource_start(dev, mmio_bar);
782 size = drm_get_resource_len(dev, mmio_bar);
783
784 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
785 _DRM_KERNEL | _DRM_DRIVER,
786 &dev_priv->mmio_map);
787 return ret;
788}
789
790int i915_driver_unload(struct drm_device *dev)
791{
792 struct drm_i915_private *dev_priv = dev->dev_private;
793
794 if (dev_priv->mmio_map)
795 drm_rmmap(dev, dev_priv->mmio_map);
796
797 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
798 DRM_MEM_DRIVER);
799
800 return 0;
801}
802
803void i915_driver_lastclose(struct drm_device * dev)
804{
805 drm_i915_private_t *dev_priv = dev->dev_private;
806
807 if (!dev_priv)
808 return;
809
810 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap));
812
813 i915_dma_cleanup(dev);
814}
815
816void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
817{
818 drm_i915_private_t *dev_priv = dev->dev_private;
819 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
820}
821
822struct drm_ioctl_desc i915_ioctls[] = {
823 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
824 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
825 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
826 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
827 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
828 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
829 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
830 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
831 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
832 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
833 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
834 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
835 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
836 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
837 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
838 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
839 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
840};
841
842int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
843
844/**
845 * Determine if the device really is AGP or not.
846 *
847 * All Intel graphics chipsets are treated as AGP, even if they are really
848 * PCI-e.
849 *
850 * \param dev The device to be tested.
851 *
852 * \returns
853 * A value of 1 is always retured to indictate every i9x5 is AGP.
854 */
855int i915_driver_device_is_agp(struct drm_device * dev)
856{
857 return 1;
858}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 000000000000..93aed1c38bd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,605 @@
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#include "drm_pciids.h"
36
37static struct pci_device_id pciidlist[] = {
38 i915_PCI_IDS
39};
40
41enum pipe {
42 PIPE_A = 0,
43 PIPE_B,
44};
45
46static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
47{
48 struct drm_i915_private *dev_priv = dev->dev_private;
49
50 if (pipe == PIPE_A)
51 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
52 else
53 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
54}
55
56static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
57{
58 struct drm_i915_private *dev_priv = dev->dev_private;
59 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
60 u32 *array;
61 int i;
62
63 if (!i915_pipe_enabled(dev, pipe))
64 return;
65
66 if (pipe == PIPE_A)
67 array = dev_priv->save_palette_a;
68 else
69 array = dev_priv->save_palette_b;
70
71 for(i = 0; i < 256; i++)
72 array[i] = I915_READ(reg + (i << 2));
73}
74
75static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
76{
77 struct drm_i915_private *dev_priv = dev->dev_private;
78 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
79 u32 *array;
80 int i;
81
82 if (!i915_pipe_enabled(dev, pipe))
83 return;
84
85 if (pipe == PIPE_A)
86 array = dev_priv->save_palette_a;
87 else
88 array = dev_priv->save_palette_b;
89
90 for(i = 0; i < 256; i++)
91 I915_WRITE(reg + (i << 2), array[i]);
92}
93
94static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
95{
96 outb(reg, index_port);
97 return inb(data_port);
98}
99
100static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
101{
102 inb(st01);
103 outb(palette_enable | reg, VGA_AR_INDEX);
104 return inb(VGA_AR_DATA_READ);
105}
106
107static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
108{
109 inb(st01);
110 outb(palette_enable | reg, VGA_AR_INDEX);
111 outb(val, VGA_AR_DATA_WRITE);
112}
113
114static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
115{
116 outb(reg, index_port);
117 outb(val, data_port);
118}
119
120static void i915_save_vga(struct drm_device *dev)
121{
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 int i;
124 u16 cr_index, cr_data, st01;
125
126 /* VGA color palette registers */
127 dev_priv->saveDACMASK = inb(VGA_DACMASK);
128 /* DACCRX automatically increments during read */
129 outb(0, VGA_DACRX);
130 /* Read 3 bytes of color data from each index */
131 for (i = 0; i < 256 * 3; i++)
132 dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
133
134 /* MSR bits */
135 dev_priv->saveMSR = inb(VGA_MSR_READ);
136 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
137 cr_index = VGA_CR_INDEX_CGA;
138 cr_data = VGA_CR_DATA_CGA;
139 st01 = VGA_ST01_CGA;
140 } else {
141 cr_index = VGA_CR_INDEX_MDA;
142 cr_data = VGA_CR_DATA_MDA;
143 st01 = VGA_ST01_MDA;
144 }
145
146 /* CRT controller regs */
147 i915_write_indexed(cr_index, cr_data, 0x11,
148 i915_read_indexed(cr_index, cr_data, 0x11) &
149 (~0x80));
150 for (i = 0; i <= 0x24; i++)
151 dev_priv->saveCR[i] =
152 i915_read_indexed(cr_index, cr_data, i);
153 /* Make sure we don't turn off CR group 0 writes */
154 dev_priv->saveCR[0x11] &= ~0x80;
155
156 /* Attribute controller registers */
157 inb(st01);
158 dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
159 for (i = 0; i <= 0x14; i++)
160 dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
161 inb(st01);
162 outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
163 inb(st01);
164
165 /* Graphics controller registers */
166 for (i = 0; i < 9; i++)
167 dev_priv->saveGR[i] =
168 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
169
170 dev_priv->saveGR[0x10] =
171 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
172 dev_priv->saveGR[0x11] =
173 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
174 dev_priv->saveGR[0x18] =
175 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
176
177 /* Sequencer registers */
178 for (i = 0; i < 8; i++)
179 dev_priv->saveSR[i] =
180 i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
181}
182
183static void i915_restore_vga(struct drm_device *dev)
184{
185 struct drm_i915_private *dev_priv = dev->dev_private;
186 int i;
187 u16 cr_index, cr_data, st01;
188
189 /* MSR bits */
190 outb(dev_priv->saveMSR, VGA_MSR_WRITE);
191 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
192 cr_index = VGA_CR_INDEX_CGA;
193 cr_data = VGA_CR_DATA_CGA;
194 st01 = VGA_ST01_CGA;
195 } else {
196 cr_index = VGA_CR_INDEX_MDA;
197 cr_data = VGA_CR_DATA_MDA;
198 st01 = VGA_ST01_MDA;
199 }
200
201 /* Sequencer registers, don't write SR07 */
202 for (i = 0; i < 7; i++)
203 i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
204 dev_priv->saveSR[i]);
205
206 /* CRT controller regs */
207 /* Enable CR group 0 writes */
208 i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
209 for (i = 0; i <= 0x24; i++)
210 i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
211
212 /* Graphics controller regs */
213 for (i = 0; i < 9; i++)
214 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
215 dev_priv->saveGR[i]);
216
217 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
218 dev_priv->saveGR[0x10]);
219 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
220 dev_priv->saveGR[0x11]);
221 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
222 dev_priv->saveGR[0x18]);
223
224 /* Attribute controller registers */
225 inb(st01);
226 for (i = 0; i <= 0x14; i++)
227 i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
228 inb(st01); /* switch back to index mode */
229 outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
230 inb(st01);
231
232 /* VGA color palette registers */
233 outb(dev_priv->saveDACMASK, VGA_DACMASK);
234 /* DACCRX automatically increments during read */
235 outb(0, VGA_DACWX);
236 /* Read 3 bytes of color data from each index */
237 for (i = 0; i < 256 * 3; i++)
238 outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
239
240}
241
242static int i915_suspend(struct drm_device *dev, pm_message_t state)
243{
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 int i;
246
247 if (!dev || !dev_priv) {
248 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
249 printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
250 return -ENODEV;
251 }
252
253 if (state.event == PM_EVENT_PRETHAW)
254 return 0;
255
256 pci_save_state(dev->pdev);
257 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
258
259 /* Display arbitration control */
260 dev_priv->saveDSPARB = I915_READ(DSPARB);
261
262 /* Pipe & plane A info */
263 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
264 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
265 dev_priv->saveFPA0 = I915_READ(FPA0);
266 dev_priv->saveFPA1 = I915_READ(FPA1);
267 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
268 if (IS_I965G(dev))
269 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
270 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
271 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
272 dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
273 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
274 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
275 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
276 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
277
278 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
279 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
280 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
281 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
282 dev_priv->saveDSPABASE = I915_READ(DSPABASE);
283 if (IS_I965G(dev)) {
284 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
285 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
286 }
287 i915_save_palette(dev, PIPE_A);
288 dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
289
290 /* Pipe & plane B info */
291 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
292 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
293 dev_priv->saveFPB0 = I915_READ(FPB0);
294 dev_priv->saveFPB1 = I915_READ(FPB1);
295 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
296 if (IS_I965G(dev))
297 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
298 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
299 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
300 dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
301 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
302 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
303 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
304 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
305
306 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
307 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
308 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
309 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
310 dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
311 if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
312 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
313 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
314 }
315 i915_save_palette(dev, PIPE_B);
316 dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
317
318 /* CRT state */
319 dev_priv->saveADPA = I915_READ(ADPA);
320
321 /* LVDS state */
322 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
323 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
324 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
325 if (IS_I965G(dev))
326 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
327 if (IS_MOBILE(dev) && !IS_I830(dev))
328 dev_priv->saveLVDS = I915_READ(LVDS);
329 if (!IS_I830(dev) && !IS_845G(dev))
330 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
331 dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
332 dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
333 dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
334
335 /* FIXME: save TV & SDVO state */
336
337 /* FBC state */
338 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
339 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
340 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
341 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
342
343 /* Interrupt state */
344 dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
345 dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
346 dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
347
348 /* VGA state */
349 dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
350 dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
351 dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
352 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
353
354 /* Clock gating state */
355 dev_priv->saveD_STATE = I915_READ(D_STATE);
356 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
357
358 /* Cache mode state */
359 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
360
361 /* Memory Arbitration state */
362 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
363
364 /* Scratch space */
365 for (i = 0; i < 16; i++) {
366 dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
367 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
368 }
369 for (i = 0; i < 3; i++)
370 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
371
372 i915_save_vga(dev);
373
374 if (state.event == PM_EVENT_SUSPEND) {
375 /* Shut down the device */
376 pci_disable_device(dev->pdev);
377 pci_set_power_state(dev->pdev, PCI_D3hot);
378 }
379
380 return 0;
381}
382
383static int i915_resume(struct drm_device *dev)
384{
385 struct drm_i915_private *dev_priv = dev->dev_private;
386 int i;
387
388 pci_set_power_state(dev->pdev, PCI_D0);
389 pci_restore_state(dev->pdev);
390 if (pci_enable_device(dev->pdev))
391 return -1;
392 pci_set_master(dev->pdev);
393
394 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
395
396 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
397
398 /* Pipe & plane A info */
399 /* Prime the clock */
400 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
401 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
402 ~DPLL_VCO_ENABLE);
403 udelay(150);
404 }
405 I915_WRITE(FPA0, dev_priv->saveFPA0);
406 I915_WRITE(FPA1, dev_priv->saveFPA1);
407 /* Actually enable it */
408 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
409 udelay(150);
410 if (IS_I965G(dev))
411 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
412 udelay(150);
413
414 /* Restore mode */
415 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
416 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
417 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
418 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
419 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
420 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
421 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
422
423 /* Restore plane info */
424 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
425 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
426 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
427 I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
428 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
429 if (IS_I965G(dev)) {
430 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
431 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
432 }
433
434 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
435
436 i915_restore_palette(dev, PIPE_A);
437 /* Enable the plane */
438 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
439 I915_WRITE(DSPABASE, I915_READ(DSPABASE));
440
441 /* Pipe & plane B info */
442 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
443 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
444 ~DPLL_VCO_ENABLE);
445 udelay(150);
446 }
447 I915_WRITE(FPB0, dev_priv->saveFPB0);
448 I915_WRITE(FPB1, dev_priv->saveFPB1);
449 /* Actually enable it */
450 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
451 udelay(150);
452 if (IS_I965G(dev))
453 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
454 udelay(150);
455
456 /* Restore mode */
457 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
458 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
459 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
460 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
461 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
462 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
463 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
464
465 /* Restore plane info */
466 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
467 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
468 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
469 I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
470 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
471 if (IS_I965G(dev)) {
472 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
473 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
474 }
475
476 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
477
478 i915_restore_palette(dev, PIPE_B);
479 /* Enable the plane */
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
482
483 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA);
485
486 /* LVDS state */
487 if (IS_I965G(dev))
488 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
489 if (IS_MOBILE(dev) && !IS_I830(dev))
490 I915_WRITE(LVDS, dev_priv->saveLVDS);
491 if (!IS_I830(dev) && !IS_845G(dev))
492 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
493
494 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
495 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
496 I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
497 I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
498 I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
499 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
500
501 /* FIXME: restore TV & SDVO state */
502
503 /* FBC info */
504 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
505 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
506 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
507 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
508
509 /* VGA state */
510 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
511 I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
512 I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
513 I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
514 udelay(150);
515
516 /* Clock gating state */
517 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
518 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
519
520 /* Cache mode state */
521 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
522
523 /* Memory arbitration state */
524 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
525
526 for (i = 0; i < 16; i++) {
527 I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
528 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
529 }
530 for (i = 0; i < 3; i++)
531 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
532
533 i915_restore_vga(dev);
534
535 return 0;
536}
537
538static struct drm_driver driver = {
539 /* don't use mtrr's here, the Xserver or user space app should
540 * deal with them for intel hardware.
541 */
542 .driver_features =
543 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
544 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
545 DRIVER_IRQ_VBL2,
546 .load = i915_driver_load,
547 .unload = i915_driver_unload,
548 .lastclose = i915_driver_lastclose,
549 .preclose = i915_driver_preclose,
550 .suspend = i915_suspend,
551 .resume = i915_resume,
552 .device_is_agp = i915_driver_device_is_agp,
553 .vblank_wait = i915_driver_vblank_wait,
554 .vblank_wait2 = i915_driver_vblank_wait2,
555 .irq_preinstall = i915_driver_irq_preinstall,
556 .irq_postinstall = i915_driver_irq_postinstall,
557 .irq_uninstall = i915_driver_irq_uninstall,
558 .irq_handler = i915_driver_irq_handler,
559 .reclaim_buffers = drm_core_reclaim_buffers,
560 .get_map_ofs = drm_core_get_map_ofs,
561 .get_reg_ofs = drm_core_get_reg_ofs,
562 .ioctls = i915_ioctls,
563 .fops = {
564 .owner = THIS_MODULE,
565 .open = drm_open,
566 .release = drm_release,
567 .ioctl = drm_ioctl,
568 .mmap = drm_mmap,
569 .poll = drm_poll,
570 .fasync = drm_fasync,
571#ifdef CONFIG_COMPAT
572 .compat_ioctl = i915_compat_ioctl,
573#endif
574 },
575
576 .pci_driver = {
577 .name = DRIVER_NAME,
578 .id_table = pciidlist,
579 },
580
581 .name = DRIVER_NAME,
582 .desc = DRIVER_DESC,
583 .date = DRIVER_DATE,
584 .major = DRIVER_MAJOR,
585 .minor = DRIVER_MINOR,
586 .patchlevel = DRIVER_PATCHLEVEL,
587};
588
589static int __init i915_init(void)
590{
591 driver.num_ioctls = i915_max_ioctl;
592 return drm_init(&driver);
593}
594
595static void __exit i915_exit(void)
596{
597 drm_exit(&driver);
598}
599
600module_init(i915_init);
601module_exit(i915_exit);
602
603MODULE_AUTHOR(DRIVER_AUTHOR);
604MODULE_DESCRIPTION(DRIVER_DESC);
605MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 000000000000..d7326d92a237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,1142 @@
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33/* General customization:
34 */
35
36#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
37
38#define DRIVER_NAME "i915"
39#define DRIVER_DESC "Intel Graphics"
40#define DRIVER_DATE "20060119"
41
42/* Interface history:
43 *
44 * 1.1: Original.
45 * 1.2: Add Power Management
46 * 1.3: Add vblank support
47 * 1.4: Fix cmdbuffer path, add heap destroy
48 * 1.5: Add vblank pipe configuration
49 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
50 * - Support vertical blank on secondary display pipe
51 */
52#define DRIVER_MAJOR 1
53#define DRIVER_MINOR 6
54#define DRIVER_PATCHLEVEL 0
55
56typedef struct _drm_i915_ring_buffer {
57 int tail_mask;
58 unsigned long Start;
59 unsigned long End;
60 unsigned long Size;
61 u8 *virtual_start;
62 int head;
63 int tail;
64 int space;
65 drm_local_map_t map;
66} drm_i915_ring_buffer_t;
67
68struct mem_block {
69 struct mem_block *next;
70 struct mem_block *prev;
71 int start;
72 int size;
73 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
74};
75
76typedef struct _drm_i915_vbl_swap {
77 struct list_head head;
78 drm_drawable_t drw_id;
79 unsigned int pipe;
80 unsigned int sequence;
81} drm_i915_vbl_swap_t;
82
83typedef struct drm_i915_private {
84 drm_local_map_t *sarea;
85 drm_local_map_t *mmio_map;
86
87 drm_i915_sarea_t *sarea_priv;
88 drm_i915_ring_buffer_t ring;
89
90 drm_dma_handle_t *status_page_dmah;
91 void *hw_status_page;
92 dma_addr_t dma_status_page;
93 unsigned long counter;
94 unsigned int status_gfx_addr;
95 drm_local_map_t hws_map;
96
97 unsigned int cpp;
98 int back_offset;
99 int front_offset;
100 int current_page;
101 int page_flipping;
102 int use_mi_batchbuffer_start;
103
104 wait_queue_head_t irq_queue;
105 atomic_t irq_received;
106 atomic_t irq_emitted;
107
108 int tex_lru_log_granularity;
109 int allow_batchbuffer;
110 struct mem_block *agp_heap;
111 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
112 int vblank_pipe;
113
114 spinlock_t swaps_lock;
115 drm_i915_vbl_swap_t vbl_swaps;
116 unsigned int swaps_pending;
117
118 /* Register state */
119 u8 saveLBB;
120 u32 saveDSPACNTR;
121 u32 saveDSPBCNTR;
122 u32 saveDSPARB;
123 u32 savePIPEACONF;
124 u32 savePIPEBCONF;
125 u32 savePIPEASRC;
126 u32 savePIPEBSRC;
127 u32 saveFPA0;
128 u32 saveFPA1;
129 u32 saveDPLL_A;
130 u32 saveDPLL_A_MD;
131 u32 saveHTOTAL_A;
132 u32 saveHBLANK_A;
133 u32 saveHSYNC_A;
134 u32 saveVTOTAL_A;
135 u32 saveVBLANK_A;
136 u32 saveVSYNC_A;
137 u32 saveBCLRPAT_A;
138 u32 savePIPEASTAT;
139 u32 saveDSPASTRIDE;
140 u32 saveDSPASIZE;
141 u32 saveDSPAPOS;
142 u32 saveDSPABASE;
143 u32 saveDSPASURF;
144 u32 saveDSPATILEOFF;
145 u32 savePFIT_PGM_RATIOS;
146 u32 saveBLC_PWM_CTL;
147 u32 saveBLC_PWM_CTL2;
148 u32 saveFPB0;
149 u32 saveFPB1;
150 u32 saveDPLL_B;
151 u32 saveDPLL_B_MD;
152 u32 saveHTOTAL_B;
153 u32 saveHBLANK_B;
154 u32 saveHSYNC_B;
155 u32 saveVTOTAL_B;
156 u32 saveVBLANK_B;
157 u32 saveVSYNC_B;
158 u32 saveBCLRPAT_B;
159 u32 savePIPEBSTAT;
160 u32 saveDSPBSTRIDE;
161 u32 saveDSPBSIZE;
162 u32 saveDSPBPOS;
163 u32 saveDSPBBASE;
164 u32 saveDSPBSURF;
165 u32 saveDSPBTILEOFF;
166 u32 saveVCLK_DIVISOR_VGA0;
167 u32 saveVCLK_DIVISOR_VGA1;
168 u32 saveVCLK_POST_DIV;
169 u32 saveVGACNTRL;
170 u32 saveADPA;
171 u32 saveLVDS;
172 u32 saveLVDSPP_ON;
173 u32 saveLVDSPP_OFF;
174 u32 saveDVOA;
175 u32 saveDVOB;
176 u32 saveDVOC;
177 u32 savePP_ON;
178 u32 savePP_OFF;
179 u32 savePP_CONTROL;
180 u32 savePP_CYCLE;
181 u32 savePFIT_CONTROL;
182 u32 save_palette_a[256];
183 u32 save_palette_b[256];
184 u32 saveFBC_CFB_BASE;
185 u32 saveFBC_LL_BASE;
186 u32 saveFBC_CONTROL;
187 u32 saveFBC_CONTROL2;
188 u32 saveIER;
189 u32 saveIIR;
190 u32 saveIMR;
191 u32 saveCACHE_MODE_0;
192 u32 saveD_STATE;
193 u32 saveDSPCLK_GATE_D;
194 u32 saveMI_ARB_STATE;
195 u32 saveSWF0[16];
196 u32 saveSWF1[16];
197 u32 saveSWF2[3];
198 u8 saveMSR;
199 u8 saveSR[8];
200 u8 saveGR[25];
201 u8 saveAR_INDEX;
202 u8 saveAR[21];
203 u8 saveDACMASK;
204 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
205 u8 saveCR[37];
206} drm_i915_private_t;
207
208extern struct drm_ioctl_desc i915_ioctls[];
209extern int i915_max_ioctl;
210
211 /* i915_dma.c */
212extern void i915_kernel_lost_context(struct drm_device * dev);
213extern int i915_driver_load(struct drm_device *, unsigned long flags);
214extern int i915_driver_unload(struct drm_device *);
215extern void i915_driver_lastclose(struct drm_device * dev);
216extern void i915_driver_preclose(struct drm_device *dev,
217 struct drm_file *file_priv);
218extern int i915_driver_device_is_agp(struct drm_device * dev);
219extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
220 unsigned long arg);
221
222/* i915_irq.c */
223extern int i915_irq_emit(struct drm_device *dev, void *data,
224 struct drm_file *file_priv);
225extern int i915_irq_wait(struct drm_device *dev, void *data,
226 struct drm_file *file_priv);
227
228extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
229extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
230extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
231extern void i915_driver_irq_preinstall(struct drm_device * dev);
232extern void i915_driver_irq_postinstall(struct drm_device * dev);
233extern void i915_driver_irq_uninstall(struct drm_device * dev);
234extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
235 struct drm_file *file_priv);
236extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
237 struct drm_file *file_priv);
238extern int i915_vblank_swap(struct drm_device *dev, void *data,
239 struct drm_file *file_priv);
240
241/* i915_mem.c */
242extern int i915_mem_alloc(struct drm_device *dev, void *data,
243 struct drm_file *file_priv);
244extern int i915_mem_free(struct drm_device *dev, void *data,
245 struct drm_file *file_priv);
246extern int i915_mem_init_heap(struct drm_device *dev, void *data,
247 struct drm_file *file_priv);
248extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
249 struct drm_file *file_priv);
250extern void i915_mem_takedown(struct mem_block **heap);
251extern void i915_mem_release(struct drm_device * dev,
252 struct drm_file *file_priv, struct mem_block *heap);
253
254#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
255#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
256#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
257#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
258
259#define I915_VERBOSE 0
260
261#define RING_LOCALS unsigned int outring, ringmask, outcount; \
262 volatile char *virt;
263
264#define BEGIN_LP_RING(n) do { \
265 if (I915_VERBOSE) \
266 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
267 if (dev_priv->ring.space < (n)*4) \
268 i915_wait_ring(dev, (n)*4, __func__); \
269 outcount = 0; \
270 outring = dev_priv->ring.tail; \
271 ringmask = dev_priv->ring.tail_mask; \
272 virt = dev_priv->ring.virtual_start; \
273} while (0)
274
275#define OUT_RING(n) do { \
276 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
277 *(volatile unsigned int *)(virt + outring) = (n); \
278 outcount++; \
279 outring += 4; \
280 outring &= ringmask; \
281} while (0)
282
283#define ADVANCE_LP_RING() do { \
284 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
285 dev_priv->ring.tail = outring; \
286 dev_priv->ring.space -= outcount * 4; \
287 I915_WRITE(LP_RING + RING_TAIL, outring); \
288} while(0)
289
290extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
291
292/* Extended config space */
293#define LBB 0xf4
294
295/* VGA stuff */
296
297#define VGA_ST01_MDA 0x3ba
298#define VGA_ST01_CGA 0x3da
299
300#define VGA_MSR_WRITE 0x3c2
301#define VGA_MSR_READ 0x3cc
302#define VGA_MSR_MEM_EN (1<<1)
303#define VGA_MSR_CGA_MODE (1<<0)
304
305#define VGA_SR_INDEX 0x3c4
306#define VGA_SR_DATA 0x3c5
307
308#define VGA_AR_INDEX 0x3c0
309#define VGA_AR_VID_EN (1<<5)
310#define VGA_AR_DATA_WRITE 0x3c0
311#define VGA_AR_DATA_READ 0x3c1
312
313#define VGA_GR_INDEX 0x3ce
314#define VGA_GR_DATA 0x3cf
315/* GR05 */
316#define VGA_GR_MEM_READ_MODE_SHIFT 3
317#define VGA_GR_MEM_READ_MODE_PLANE 1
318/* GR06 */
319#define VGA_GR_MEM_MODE_MASK 0xc
320#define VGA_GR_MEM_MODE_SHIFT 2
321#define VGA_GR_MEM_A0000_AFFFF 0
322#define VGA_GR_MEM_A0000_BFFFF 1
323#define VGA_GR_MEM_B0000_B7FFF 2
324#define VGA_GR_MEM_B0000_BFFFF 3
325
326#define VGA_DACMASK 0x3c6
327#define VGA_DACRX 0x3c7
328#define VGA_DACWX 0x3c8
329#define VGA_DACDATA 0x3c9
330
331#define VGA_CR_INDEX_MDA 0x3b4
332#define VGA_CR_DATA_MDA 0x3b5
333#define VGA_CR_INDEX_CGA 0x3d4
334#define VGA_CR_DATA_CGA 0x3d5
335
336#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
337#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
338#define CMD_REPORT_HEAD (7<<23)
339#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
340#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
341
342#define INST_PARSER_CLIENT 0x00000000
343#define INST_OP_FLUSH 0x02000000
344#define INST_FLUSH_MAP_CACHE 0x00000001
345
346#define BB1_START_ADDR_MASK (~0x7)
347#define BB1_PROTECTED (1<<0)
348#define BB1_UNPROTECTED (0<<0)
349#define BB2_END_ADDR_MASK (~0x7)
350
351/* Framebuffer compression */
352#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
353#define FBC_LL_BASE 0x03204 /* 4k page aligned */
354#define FBC_CONTROL 0x03208
355#define FBC_CTL_EN (1<<31)
356#define FBC_CTL_PERIODIC (1<<30)
357#define FBC_CTL_INTERVAL_SHIFT (16)
358#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
359#define FBC_CTL_STRIDE_SHIFT (5)
360#define FBC_CTL_FENCENO (1<<0)
361#define FBC_COMMAND 0x0320c
362#define FBC_CMD_COMPRESS (1<<0)
363#define FBC_STATUS 0x03210
364#define FBC_STAT_COMPRESSING (1<<31)
365#define FBC_STAT_COMPRESSED (1<<30)
366#define FBC_STAT_MODIFIED (1<<29)
367#define FBC_STAT_CURRENT_LINE (1<<0)
368#define FBC_CONTROL2 0x03214
369#define FBC_CTL_FENCE_DBL (0<<4)
370#define FBC_CTL_IDLE_IMM (0<<2)
371#define FBC_CTL_IDLE_FULL (1<<2)
372#define FBC_CTL_IDLE_LINE (2<<2)
373#define FBC_CTL_IDLE_DEBUG (3<<2)
374#define FBC_CTL_CPU_FENCE (1<<1)
375#define FBC_CTL_PLANEA (0<<0)
376#define FBC_CTL_PLANEB (1<<0)
377#define FBC_FENCE_OFF 0x0321b
378
379#define FBC_LL_SIZE (1536)
380#define FBC_LL_PAD (32)
381
382/* Interrupt bits:
383 */
384#define USER_INT_FLAG (1<<1)
385#define VSYNC_PIPEB_FLAG (1<<5)
386#define VSYNC_PIPEA_FLAG (1<<7)
387#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
388
389#define I915REG_HWSTAM 0x02098
390#define I915REG_INT_IDENTITY_R 0x020a4
391#define I915REG_INT_MASK_R 0x020a8
392#define I915REG_INT_ENABLE_R 0x020a0
393
394#define I915REG_PIPEASTAT 0x70024
395#define I915REG_PIPEBSTAT 0x71024
396
397#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
398#define I915_VBLANK_CLEAR (1UL<<1)
399
400#define SRX_INDEX 0x3c4
401#define SRX_DATA 0x3c5
402#define SR01 1
403#define SR01_SCREEN_OFF (1<<5)
404
405#define PPCR 0x61204
406#define PPCR_ON (1<<0)
407
408#define DVOB 0x61140
409#define DVOB_ON (1<<31)
410#define DVOC 0x61160
411#define DVOC_ON (1<<31)
412#define LVDS 0x61180
413#define LVDS_ON (1<<31)
414
415#define ADPA 0x61100
416#define ADPA_DPMS_MASK (~(3<<10))
417#define ADPA_DPMS_ON (0<<10)
418#define ADPA_DPMS_SUSPEND (1<<10)
419#define ADPA_DPMS_STANDBY (2<<10)
420#define ADPA_DPMS_OFF (3<<10)
421
422#define NOPID 0x2094
423#define LP_RING 0x2030
424#define HP_RING 0x2040
425/* The binner has its own ring buffer:
426 */
427#define HWB_RING 0x2400
428
429#define RING_TAIL 0x00
430#define TAIL_ADDR 0x001FFFF8
431#define RING_HEAD 0x04
432#define HEAD_WRAP_COUNT 0xFFE00000
433#define HEAD_WRAP_ONE 0x00200000
434#define HEAD_ADDR 0x001FFFFC
435#define RING_START 0x08
436#define START_ADDR 0x0xFFFFF000
437#define RING_LEN 0x0C
438#define RING_NR_PAGES 0x001FF000
439#define RING_REPORT_MASK 0x00000006
440#define RING_REPORT_64K 0x00000002
441#define RING_REPORT_128K 0x00000004
442#define RING_NO_REPORT 0x00000000
443#define RING_VALID_MASK 0x00000001
444#define RING_VALID 0x00000001
445#define RING_INVALID 0x00000000
446
447/* Instruction parser error reg:
448 */
449#define IPEIR 0x2088
450
451/* Scratch pad debug 0 reg:
452 */
453#define SCPD0 0x209c
454
455/* Error status reg:
456 */
457#define ESR 0x20b8
458
459/* Secondary DMA fetch address debug reg:
460 */
461#define DMA_FADD_S 0x20d4
462
463/* Memory Interface Arbitration State
464 */
465#define MI_ARB_STATE 0x20e4
466
467/* Cache mode 0 reg.
468 * - Manipulating render cache behaviour is central
469 * to the concept of zone rendering, tuning this reg can help avoid
470 * unnecessary render cache reads and even writes (for z/stencil)
471 * at beginning and end of scene.
472 *
473 * - To change a bit, write to this reg with a mask bit set and the
474 * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
475 */
476#define Cache_Mode_0 0x2120
477#define CACHE_MODE_0 0x2120
478#define CM0_MASK_SHIFT 16
479#define CM0_IZ_OPT_DISABLE (1<<6)
480#define CM0_ZR_OPT_DISABLE (1<<5)
481#define CM0_DEPTH_EVICT_DISABLE (1<<4)
482#define CM0_COLOR_EVICT_DISABLE (1<<3)
483#define CM0_DEPTH_WRITE_DISABLE (1<<1)
484#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
485
486
487/* Graphics flush control. A CPU write flushes the GWB of all writes.
488 * The data is discarded.
489 */
490#define GFX_FLSH_CNTL 0x2170
491
492/* Binner control. Defines the location of the bin pointer list:
493 */
494#define BINCTL 0x2420
495#define BC_MASK (1 << 9)
496
497/* Binned scene info.
498 */
499#define BINSCENE 0x2428
500#define BS_OP_LOAD (1 << 8)
501#define BS_MASK (1 << 22)
502
503/* Bin command parser debug reg:
504 */
505#define BCPD 0x2480
506
507/* Bin memory control debug reg:
508 */
509#define BMCD 0x2484
510
511/* Bin data cache debug reg:
512 */
513#define BDCD 0x2488
514
515/* Binner pointer cache debug reg:
516 */
517#define BPCD 0x248c
518
519/* Binner scratch pad debug reg:
520 */
521#define BINSKPD 0x24f0
522
523/* HWB scratch pad debug reg:
524 */
525#define HWBSKPD 0x24f4
526
527/* Binner memory pool reg:
528 */
529#define BMP_BUFFER 0x2430
530#define BMP_PAGE_SIZE_4K (0 << 10)
531#define BMP_BUFFER_SIZE_SHIFT 1
532#define BMP_ENABLE (1 << 0)
533
534/* Get/put memory from the binner memory pool:
535 */
536#define BMP_GET 0x2438
537#define BMP_PUT 0x2440
538#define BMP_OFFSET_SHIFT 5
539
540/* 3D state packets:
541 */
542#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
543
544#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
545#define SC_UPDATE_SCISSOR (0x1<<1)
546#define SC_ENABLE_MASK (0x1<<0)
547#define SC_ENABLE (0x1<<0)
548
549#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
550
551#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
552#define SCI_YMIN_MASK (0xffff<<16)
553#define SCI_XMIN_MASK (0xffff<<0)
554#define SCI_YMAX_MASK (0xffff<<16)
555#define SCI_XMAX_MASK (0xffff<<0)
556
557#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
558#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
559#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
560#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
561#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
562#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
563#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
564
565#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
566
567#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
568#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
569#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
570#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
571#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
572#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
573
574#define MI_BATCH_BUFFER ((0x30<<23)|1)
575#define MI_BATCH_BUFFER_START (0x31<<23)
576#define MI_BATCH_BUFFER_END (0xA<<23)
577#define MI_BATCH_NON_SECURE (1)
578#define MI_BATCH_NON_SECURE_I965 (1<<8)
579
580#define MI_WAIT_FOR_EVENT ((0x3<<23))
581#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
582#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
583#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
584
585#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
586
587#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
588#define ASYNC_FLIP (1<<22)
589#define DISPLAY_PLANE_A (0<<20)
590#define DISPLAY_PLANE_B (1<<20)
591
592/* Display regs */
593#define DSPACNTR 0x70180
594#define DSPBCNTR 0x71180
595#define DISPPLANE_SEL_PIPE_MASK (1<<24)
596
597/* Define the region of interest for the binner:
598 */
599#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
600
601#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
602
603#define CMD_MI_FLUSH (0x04 << 23)
604#define MI_NO_WRITE_FLUSH (1 << 2)
605#define MI_READ_FLUSH (1 << 0)
606#define MI_EXE_FLUSH (1 << 1)
607#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
608#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
609
610#define BREADCRUMB_BITS 31
611#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
612
613#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
614#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
615
616#define BLC_PWM_CTL 0x61254
617#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
618
619#define BLC_PWM_CTL2 0x61250
620/**
621 * This is the most significant 15 bits of the number of backlight cycles in a
622 * complete cycle of the modulated backlight control.
623 *
624 * The actual value is this field multiplied by two.
625 */
626#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
627#define BLM_LEGACY_MODE (1 << 16)
628/**
629 * This is the number of cycles out of the backlight modulation cycle for which
630 * the backlight is on.
631 *
632 * This field must be no greater than the number of cycles in the complete
633 * backlight modulation cycle.
634 */
635#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
636#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
637
638#define I915_GCFGC 0xf0
639#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
640#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
641#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
642#define I915_DISPLAY_CLOCK_MASK (7 << 4)
643
644#define I855_HPLLCC 0xc0
645#define I855_CLOCK_CONTROL_MASK (3 << 0)
646#define I855_CLOCK_133_200 (0 << 0)
647#define I855_CLOCK_100_200 (1 << 0)
648#define I855_CLOCK_100_133 (2 << 0)
649#define I855_CLOCK_166_250 (3 << 0)
650
651/* p317, 319
652 */
653#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
654#define VCLK2_VCO_N 0x600a
655#define VCLK2_VCO_DIV_SEL 0x6012
656
657#define VCLK_DIVISOR_VGA0 0x6000
658#define VCLK_DIVISOR_VGA1 0x6004
659#define VCLK_POST_DIV 0x6010
660/** Selects a post divisor of 4 instead of 2. */
661# define VGA1_PD_P2_DIV_4 (1 << 15)
662/** Overrides the p2 post divisor field */
663# define VGA1_PD_P1_DIV_2 (1 << 13)
664# define VGA1_PD_P1_SHIFT 8
665/** P1 value is 2 greater than this field */
666# define VGA1_PD_P1_MASK (0x1f << 8)
667/** Selects a post divisor of 4 instead of 2. */
668# define VGA0_PD_P2_DIV_4 (1 << 7)
669/** Overrides the p2 post divisor field */
670# define VGA0_PD_P1_DIV_2 (1 << 5)
671# define VGA0_PD_P1_SHIFT 0
672/** P1 value is 2 greater than this field */
673# define VGA0_PD_P1_MASK (0x1f << 0)
674
675/* PCI D state control register */
676#define D_STATE 0x6104
677#define DSPCLK_GATE_D 0x6200
678
679/* I830 CRTC registers */
680#define HTOTAL_A 0x60000
681#define HBLANK_A 0x60004
682#define HSYNC_A 0x60008
683#define VTOTAL_A 0x6000c
684#define VBLANK_A 0x60010
685#define VSYNC_A 0x60014
686#define PIPEASRC 0x6001c
687#define BCLRPAT_A 0x60020
688#define VSYNCSHIFT_A 0x60028
689
690#define HTOTAL_B 0x61000
691#define HBLANK_B 0x61004
692#define HSYNC_B 0x61008
693#define VTOTAL_B 0x6100c
694#define VBLANK_B 0x61010
695#define VSYNC_B 0x61014
696#define PIPEBSRC 0x6101c
697#define BCLRPAT_B 0x61020
698#define VSYNCSHIFT_B 0x61028
699
700#define PP_STATUS 0x61200
701# define PP_ON (1 << 31)
702/**
703 * Indicates that all dependencies of the panel are on:
704 *
705 * - PLL enabled
706 * - pipe enabled
707 * - LVDS/DVOB/DVOC on
708 */
709# define PP_READY (1 << 30)
710# define PP_SEQUENCE_NONE (0 << 28)
711# define PP_SEQUENCE_ON (1 << 28)
712# define PP_SEQUENCE_OFF (2 << 28)
713# define PP_SEQUENCE_MASK 0x30000000
714#define PP_CONTROL 0x61204
715# define POWER_TARGET_ON (1 << 0)
716
717#define LVDSPP_ON 0x61208
718#define LVDSPP_OFF 0x6120c
719#define PP_CYCLE 0x61210
720
721#define PFIT_CONTROL 0x61230
722# define PFIT_ENABLE (1 << 31)
723# define PFIT_PIPE_MASK (3 << 29)
724# define PFIT_PIPE_SHIFT 29
725# define VERT_INTERP_DISABLE (0 << 10)
726# define VERT_INTERP_BILINEAR (1 << 10)
727# define VERT_INTERP_MASK (3 << 10)
728# define VERT_AUTO_SCALE (1 << 9)
729# define HORIZ_INTERP_DISABLE (0 << 6)
730# define HORIZ_INTERP_BILINEAR (1 << 6)
731# define HORIZ_INTERP_MASK (3 << 6)
732# define HORIZ_AUTO_SCALE (1 << 5)
733# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
734
735#define PFIT_PGM_RATIOS 0x61234
736# define PFIT_VERT_SCALE_MASK 0xfff00000
737# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
738
739#define PFIT_AUTO_RATIOS 0x61238
740
741
742#define DPLL_A 0x06014
743#define DPLL_B 0x06018
744# define DPLL_VCO_ENABLE (1 << 31)
745# define DPLL_DVO_HIGH_SPEED (1 << 30)
746# define DPLL_SYNCLOCK_ENABLE (1 << 29)
747# define DPLL_VGA_MODE_DIS (1 << 28)
748# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
749# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
750# define DPLL_MODE_MASK (3 << 26)
751# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
752# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
753# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
754# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
755# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
756# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
757/**
758 * The i830 generation, in DAC/serial mode, defines p1 as two plus this
759 * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
760 */
761# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
762/**
763 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
764 * this field (only one bit may be set).
765 */
766# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
767# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
768# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
769# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
770# define PLL_REF_INPUT_DREFCLK (0 << 13)
771# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
772# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
773# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
774# define PLL_REF_INPUT_MASK (3 << 13)
775# define PLL_LOAD_PULSE_PHASE_SHIFT 9
776/*
777 * Parallel to Serial Load Pulse phase selection.
778 * Selects the phase for the 10X DPLL clock for the PCIe
779 * digital display port. The range is 4 to 13; 10 or more
780 * is just a flip delay. The default is 6
781 */
782# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
783# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
784
785/**
786 * SDVO multiplier for 945G/GM. Not used on 965.
787 *
788 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
789 */
790# define SDVO_MULTIPLIER_MASK 0x000000ff
791# define SDVO_MULTIPLIER_SHIFT_HIRES 4
792# define SDVO_MULTIPLIER_SHIFT_VGA 0
793
794/** @defgroup DPLL_MD
795 * @{
796 */
797/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
798#define DPLL_A_MD 0x0601c
799/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
800#define DPLL_B_MD 0x06020
801/**
802 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
803 *
804 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
805 */
806# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
807# define DPLL_MD_UDI_DIVIDER_SHIFT 24
808/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
809# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
810# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
811/**
812 * SDVO/UDI pixel multiplier.
813 *
814 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
815 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
816 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
817 * dummy bytes in the datastream at an increased clock rate, with both sides of
818 * the link knowing how many bytes are fill.
819 *
820 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
821 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
822 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
823 * through an SDVO command.
824 *
825 * This register field has values of multiplication factor minus 1, with
826 * a maximum multiplier of 5 for SDVO.
827 */
828# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
829# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
830/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
831 * This best be set to the default value (3) or the CRT won't work. No,
832 * I don't entirely understand what this does...
833 */
834# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
835# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
836/** @} */
837
838#define DPLL_TEST 0x606c
839# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
840# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
841# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
842# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
843# define DPLLB_TEST_N_BYPASS (1 << 19)
844# define DPLLB_TEST_M_BYPASS (1 << 18)
845# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
846# define DPLLA_TEST_N_BYPASS (1 << 3)
847# define DPLLA_TEST_M_BYPASS (1 << 2)
848# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
849
850#define ADPA 0x61100
851#define ADPA_DAC_ENABLE (1<<31)
852#define ADPA_DAC_DISABLE 0
853#define ADPA_PIPE_SELECT_MASK (1<<30)
854#define ADPA_PIPE_A_SELECT 0
855#define ADPA_PIPE_B_SELECT (1<<30)
856#define ADPA_USE_VGA_HVPOLARITY (1<<15)
857#define ADPA_SETS_HVPOLARITY 0
858#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
859#define ADPA_VSYNC_CNTL_ENABLE 0
860#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
861#define ADPA_HSYNC_CNTL_ENABLE 0
862#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
863#define ADPA_VSYNC_ACTIVE_LOW 0
864#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
865#define ADPA_HSYNC_ACTIVE_LOW 0
866
867#define FPA0 0x06040
868#define FPA1 0x06044
869#define FPB0 0x06048
870#define FPB1 0x0604c
871# define FP_N_DIV_MASK 0x003f0000
872# define FP_N_DIV_SHIFT 16
873# define FP_M1_DIV_MASK 0x00003f00
874# define FP_M1_DIV_SHIFT 8
875# define FP_M2_DIV_MASK 0x0000003f
876# define FP_M2_DIV_SHIFT 0
877
878
879#define PORT_HOTPLUG_EN 0x61110
880# define SDVOB_HOTPLUG_INT_EN (1 << 26)
881# define SDVOC_HOTPLUG_INT_EN (1 << 25)
882# define TV_HOTPLUG_INT_EN (1 << 18)
883# define CRT_HOTPLUG_INT_EN (1 << 9)
884# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
885
886#define PORT_HOTPLUG_STAT 0x61114
887# define CRT_HOTPLUG_INT_STATUS (1 << 11)
888# define TV_HOTPLUG_INT_STATUS (1 << 10)
889# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
890# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
891# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
892# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
893# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
894# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
895
896#define SDVOB 0x61140
897#define SDVOC 0x61160
898#define SDVO_ENABLE (1 << 31)
899#define SDVO_PIPE_B_SELECT (1 << 30)
900#define SDVO_STALL_SELECT (1 << 29)
901#define SDVO_INTERRUPT_ENABLE (1 << 26)
902/**
903 * 915G/GM SDVO pixel multiplier.
904 *
905 * Programmed value is multiplier - 1, up to 5x.
906 *
907 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
908 */
909#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
910#define SDVO_PORT_MULTIPLY_SHIFT 23
911#define SDVO_PHASE_SELECT_MASK (15 << 19)
912#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
913#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
914#define SDVOC_GANG_MODE (1 << 16)
915#define SDVO_BORDER_ENABLE (1 << 7)
916#define SDVOB_PCIE_CONCURRENCY (1 << 3)
917#define SDVO_DETECTED (1 << 2)
918/* Bits to be preserved when writing */
919#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
920#define SDVOC_PRESERVE_MASK (1 << 17)
921
922/** @defgroup LVDS
923 * @{
924 */
925/**
926 * This register controls the LVDS output enable, pipe selection, and data
927 * format selection.
928 *
929 * All of the clock/data pairs are force powered down by power sequencing.
930 */
931#define LVDS 0x61180
932/**
933 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
934 * the DPLL semantics change when the LVDS is assigned to that pipe.
935 */
936# define LVDS_PORT_EN (1 << 31)
937/** Selects pipe B for LVDS data. Must be set on pre-965. */
938# define LVDS_PIPEB_SELECT (1 << 30)
939
940/**
941 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
942 * pixel.
943 */
944# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
945# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
946# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
947/**
948 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
949 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
950 * on.
951 */
952# define LVDS_A3_POWER_MASK (3 << 6)
953# define LVDS_A3_POWER_DOWN (0 << 6)
954# define LVDS_A3_POWER_UP (3 << 6)
955/**
956 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
957 * is set.
958 */
959# define LVDS_CLKB_POWER_MASK (3 << 4)
960# define LVDS_CLKB_POWER_DOWN (0 << 4)
961# define LVDS_CLKB_POWER_UP (3 << 4)
962
963/**
964 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
965 * setting for whether we are in dual-channel mode. The B3 pair will
966 * additionally only be powered up when LVDS_A3_POWER_UP is set.
967 */
968# define LVDS_B0B3_POWER_MASK (3 << 2)
969# define LVDS_B0B3_POWER_DOWN (0 << 2)
970# define LVDS_B0B3_POWER_UP (3 << 2)
971
972#define PIPEACONF 0x70008
973#define PIPEACONF_ENABLE (1<<31)
974#define PIPEACONF_DISABLE 0
975#define PIPEACONF_DOUBLE_WIDE (1<<30)
976#define I965_PIPECONF_ACTIVE (1<<30)
977#define PIPEACONF_SINGLE_WIDE 0
978#define PIPEACONF_PIPE_UNLOCKED 0
979#define PIPEACONF_PIPE_LOCKED (1<<25)
980#define PIPEACONF_PALETTE 0
981#define PIPEACONF_GAMMA (1<<24)
982#define PIPECONF_FORCE_BORDER (1<<25)
983#define PIPECONF_PROGRESSIVE (0 << 21)
984#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
985#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
986
987#define DSPARB 0x70030
988#define DSPARB_CSTART_MASK (0x7f << 7)
989#define DSPARB_CSTART_SHIFT 7
990#define DSPARB_BSTART_MASK (0x7f)
991#define DSPARB_BSTART_SHIFT 0
992
993#define PIPEBCONF 0x71008
994#define PIPEBCONF_ENABLE (1<<31)
995#define PIPEBCONF_DISABLE 0
996#define PIPEBCONF_DOUBLE_WIDE (1<<30)
997#define PIPEBCONF_DISABLE 0
998#define PIPEBCONF_GAMMA (1<<24)
999#define PIPEBCONF_PALETTE 0
1000
1001#define PIPEBGCMAXRED 0x71010
1002#define PIPEBGCMAXGREEN 0x71014
1003#define PIPEBGCMAXBLUE 0x71018
1004#define PIPEBSTAT 0x71024
1005#define PIPEBFRAMEHIGH 0x71040
1006#define PIPEBFRAMEPIXEL 0x71044
1007
1008#define DSPACNTR 0x70180
1009#define DSPBCNTR 0x71180
1010#define DISPLAY_PLANE_ENABLE (1<<31)
1011#define DISPLAY_PLANE_DISABLE 0
1012#define DISPPLANE_GAMMA_ENABLE (1<<30)
1013#define DISPPLANE_GAMMA_DISABLE 0
1014#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
1015#define DISPPLANE_8BPP (0x2<<26)
1016#define DISPPLANE_15_16BPP (0x4<<26)
1017#define DISPPLANE_16BPP (0x5<<26)
1018#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1019#define DISPPLANE_32BPP (0x7<<26)
1020#define DISPPLANE_STEREO_ENABLE (1<<25)
1021#define DISPPLANE_STEREO_DISABLE 0
1022#define DISPPLANE_SEL_PIPE_MASK (1<<24)
1023#define DISPPLANE_SEL_PIPE_A 0
1024#define DISPPLANE_SEL_PIPE_B (1<<24)
1025#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
1026#define DISPPLANE_SRC_KEY_DISABLE 0
1027#define DISPPLANE_LINE_DOUBLE (1<<20)
1028#define DISPPLANE_NO_LINE_DOUBLE 0
1029#define DISPPLANE_STEREO_POLARITY_FIRST 0
1030#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1031/* plane B only */
1032#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
1033#define DISPPLANE_ALPHA_TRANS_DISABLE 0
1034#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
1035#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
1036
1037#define DSPABASE 0x70184
1038#define DSPASTRIDE 0x70188
1039
1040#define DSPBBASE 0x71184
1041#define DSPBADDR DSPBBASE
1042#define DSPBSTRIDE 0x71188
1043
1044#define DSPAKEYVAL 0x70194
1045#define DSPAKEYMASK 0x70198
1046
1047#define DSPAPOS 0x7018C /* reserved */
1048#define DSPASIZE 0x70190
1049#define DSPBPOS 0x7118C
1050#define DSPBSIZE 0x71190
1051
1052#define DSPASURF 0x7019C
1053#define DSPATILEOFF 0x701A4
1054
1055#define DSPBSURF 0x7119C
1056#define DSPBTILEOFF 0x711A4
1057
1058#define VGACNTRL 0x71400
1059# define VGA_DISP_DISABLE (1 << 31)
1060# define VGA_2X_MODE (1 << 30)
1061# define VGA_PIPE_B_SELECT (1 << 29)
1062
1063/*
1064 * Some BIOS scratch area registers. The 845 (and 830?) store the amount
1065 * of video memory available to the BIOS in SWF1.
1066 */
1067
1068#define SWF0 0x71410
1069
1070/*
1071 * 855 scratch registers.
1072 */
1073#define SWF10 0x70410
1074
1075#define SWF30 0x72414
1076
1077/*
1078 * Overlay registers. These are overlay registers accessed via MMIO.
1079 * Those loaded via the overlay register page are defined in i830_video.c.
1080 */
1081#define OVADD 0x30000
1082
1083#define DOVSTA 0x30008
1084#define OC_BUF (0x3<<20)
1085
1086#define OGAMC5 0x30010
1087#define OGAMC4 0x30014
1088#define OGAMC3 0x30018
1089#define OGAMC2 0x3001c
1090#define OGAMC1 0x30020
1091#define OGAMC0 0x30024
1092/*
1093 * Palette registers
1094 */
1095#define PALETTE_A 0x0a000
1096#define PALETTE_B 0x0a800
1097
1098#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1099#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1100#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1101#define IS_I855(dev) ((dev)->pci_device == 0x3582)
1102#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1103
1104#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
1105#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1106#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1107#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
1108 (dev)->pci_device == 0x27AE)
1109#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
1110 (dev)->pci_device == 0x2982 || \
1111 (dev)->pci_device == 0x2992 || \
1112 (dev)->pci_device == 0x29A2 || \
1113 (dev)->pci_device == 0x2A02 || \
1114 (dev)->pci_device == 0x2A12 || \
1115 (dev)->pci_device == 0x2A42 || \
1116 (dev)->pci_device == 0x2E02 || \
1117 (dev)->pci_device == 0x2E12 || \
1118 (dev)->pci_device == 0x2E22)
1119
1120#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
1121
1122#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
1123
1124#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1125 (dev)->pci_device == 0x2E12 || \
1126 (dev)->pci_device == 0x2E22)
1127
1128#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
1129 (dev)->pci_device == 0x29B2 || \
1130 (dev)->pci_device == 0x29D2)
1131
1132#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1133 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1134
1135#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1136 IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
1137
1138#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
1139
1140#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1141
1142#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 000000000000..1fe68a251b75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,222 @@
1/**
2 * \file i915_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the i915 DRM.
5 *
6 * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Alan Hourihane 2005
11 * All Rights Reserved.
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32#include <linux/compat.h>
33
34#include "drmP.h"
35#include "drm.h"
36#include "i915_drm.h"
37
38typedef struct _drm_i915_batchbuffer32 {
39 int start; /* agp offset */
40 int used; /* nr bytes in use */
41 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
42 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
43 int num_cliprects; /* mulitpass with multiple cliprects? */
44 u32 cliprects; /* pointer to userspace cliprects */
45} drm_i915_batchbuffer32_t;
46
47static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
48 unsigned long arg)
49{
50 drm_i915_batchbuffer32_t batchbuffer32;
51 drm_i915_batchbuffer_t __user *batchbuffer;
52
53 if (copy_from_user
54 (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
55 return -EFAULT;
56
57 batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
58 if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
59 || __put_user(batchbuffer32.start, &batchbuffer->start)
60 || __put_user(batchbuffer32.used, &batchbuffer->used)
61 || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
62 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
63 || __put_user(batchbuffer32.num_cliprects,
64 &batchbuffer->num_cliprects)
65 || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
66 &batchbuffer->cliprects))
67 return -EFAULT;
68
69 return drm_ioctl(file->f_path.dentry->d_inode, file,
70 DRM_IOCTL_I915_BATCHBUFFER,
71 (unsigned long)batchbuffer);
72}
73
74typedef struct _drm_i915_cmdbuffer32 {
75 u32 buf; /* pointer to userspace command buffer */
76 int sz; /* nr bytes in buf */
77 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
78 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
79 int num_cliprects; /* mulitpass with multiple cliprects? */
80 u32 cliprects; /* pointer to userspace cliprects */
81} drm_i915_cmdbuffer32_t;
82
83static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
84 unsigned long arg)
85{
86 drm_i915_cmdbuffer32_t cmdbuffer32;
87 drm_i915_cmdbuffer_t __user *cmdbuffer;
88
89 if (copy_from_user
90 (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
91 return -EFAULT;
92
93 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
94 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
95 || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
96 &cmdbuffer->buf)
97 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
98 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
99 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
100 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
101 || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
102 &cmdbuffer->cliprects))
103 return -EFAULT;
104
105 return drm_ioctl(file->f_path.dentry->d_inode, file,
106 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
107}
108
109typedef struct drm_i915_irq_emit32 {
110 u32 irq_seq;
111} drm_i915_irq_emit32_t;
112
113static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
114 unsigned long arg)
115{
116 drm_i915_irq_emit32_t req32;
117 drm_i915_irq_emit_t __user *request;
118
119 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
120 return -EFAULT;
121
122 request = compat_alloc_user_space(sizeof(*request));
123 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
124 || __put_user((int __user *)(unsigned long)req32.irq_seq,
125 &request->irq_seq))
126 return -EFAULT;
127
128 return drm_ioctl(file->f_path.dentry->d_inode, file,
129 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
130}
131typedef struct drm_i915_getparam32 {
132 int param;
133 u32 value;
134} drm_i915_getparam32_t;
135
136static int compat_i915_getparam(struct file *file, unsigned int cmd,
137 unsigned long arg)
138{
139 drm_i915_getparam32_t req32;
140 drm_i915_getparam_t __user *request;
141
142 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
143 return -EFAULT;
144
145 request = compat_alloc_user_space(sizeof(*request));
146 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
147 || __put_user(req32.param, &request->param)
148 || __put_user((void __user *)(unsigned long)req32.value,
149 &request->value))
150 return -EFAULT;
151
152 return drm_ioctl(file->f_path.dentry->d_inode, file,
153 DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
154}
155
156typedef struct drm_i915_mem_alloc32 {
157 int region;
158 int alignment;
159 int size;
160 u32 region_offset; /* offset from start of fb or agp */
161} drm_i915_mem_alloc32_t;
162
163static int compat_i915_alloc(struct file *file, unsigned int cmd,
164 unsigned long arg)
165{
166 drm_i915_mem_alloc32_t req32;
167 drm_i915_mem_alloc_t __user *request;
168
169 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
170 return -EFAULT;
171
172 request = compat_alloc_user_space(sizeof(*request));
173 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
174 || __put_user(req32.region, &request->region)
175 || __put_user(req32.alignment, &request->alignment)
176 || __put_user(req32.size, &request->size)
177 || __put_user((void __user *)(unsigned long)req32.region_offset,
178 &request->region_offset))
179 return -EFAULT;
180
181 return drm_ioctl(file->f_path.dentry->d_inode, file,
182 DRM_IOCTL_I915_ALLOC, (unsigned long)request);
183}
184
185drm_ioctl_compat_t *i915_compat_ioctls[] = {
186 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
187 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
188 [DRM_I915_GETPARAM] = compat_i915_getparam,
189 [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
190 [DRM_I915_ALLOC] = compat_i915_alloc
191};
192
193/**
194 * Called whenever a 32-bit process running under a 64-bit kernel
195 * performs an ioctl on /dev/dri/card<n>.
196 *
197 * \param filp file pointer.
198 * \param cmd command.
199 * \param arg user argument.
200 * \return zero on success or negative number on failure.
201 */
202long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
203{
204 unsigned int nr = DRM_IOCTL_NR(cmd);
205 drm_ioctl_compat_t *fn = NULL;
206 int ret;
207
208 if (nr < DRM_COMMAND_BASE)
209 return drm_compat_ioctl(filp, cmd, arg);
210
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213
214 lock_kernel(); /* XXX for now */
215 if (fn != NULL)
216 ret = (*fn) (filp, cmd, arg);
217 else
218 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
219 unlock_kernel();
220
221 return ret;
222}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 000000000000..df036118b8b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,623 @@
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34#define USER_INT_FLAG (1<<1)
35#define VSYNC_PIPEB_FLAG (1<<5)
36#define VSYNC_PIPEA_FLAG (1<<7)
37
38#define MAX_NOPID ((u32)~0)
39
40/**
41 * Emit blits for scheduled buffer swaps.
42 *
43 * This function will be called with the HW lock held.
44 */
45static void i915_vblank_tasklet(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received),
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp;
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD;
60 u32 src_pitch = sarea_priv->pitch * cpp;
61 u32 dst_pitch = sarea_priv->pitch * cpp;
62 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
63 RING_LOCALS;
64
65 if (IS_I965G(dev) && sarea_priv->front_tiled) {
66 cmd |= XY_SRC_COPY_BLT_DST_TILED;
67 dst_pitch >>= 2;
68 }
69 if (IS_I965G(dev) && sarea_priv->back_tiled) {
70 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
71 src_pitch >>= 2;
72 }
73
74 DRM_DEBUG("\n");
75
76 INIT_LIST_HEAD(&hits);
77
78 nhits = nrects = 0;
79
80 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
81
82 /* Find buffer swaps scheduled for this vertical blank */
83 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
84 drm_i915_vbl_swap_t *vbl_swap =
85 list_entry(list, drm_i915_vbl_swap_t, head);
86
87 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
88 continue;
89
90 list_del(list);
91 dev_priv->swaps_pending--;
92
93 spin_unlock(&dev_priv->swaps_lock);
94 spin_lock(&dev->drw_lock);
95
96 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
97
98 if (!drw) {
99 spin_unlock(&dev->drw_lock);
100 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
101 spin_lock(&dev_priv->swaps_lock);
102 continue;
103 }
104
105 list_for_each(hit, &hits) {
106 drm_i915_vbl_swap_t *swap_cmp =
107 list_entry(hit, drm_i915_vbl_swap_t, head);
108 struct drm_drawable_info *drw_cmp =
109 drm_get_drawable_info(dev, swap_cmp->drw_id);
110
111 if (drw_cmp &&
112 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
113 list_add_tail(list, hit);
114 break;
115 }
116 }
117
118 spin_unlock(&dev->drw_lock);
119
120 /* List of hits was empty, or we reached the end of it */
121 if (hit == &hits)
122 list_add_tail(list, hits.prev);
123
124 nhits++;
125
126 spin_lock(&dev_priv->swaps_lock);
127 }
128
129 if (nhits == 0) {
130 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
131 return;
132 }
133
134 spin_unlock(&dev_priv->swaps_lock);
135
136 i915_kernel_lost_context(dev);
137
138 if (IS_I965G(dev)) {
139 BEGIN_LP_RING(4);
140
141 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
142 OUT_RING(0);
143 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
144 OUT_RING(0);
145 ADVANCE_LP_RING();
146 } else {
147 BEGIN_LP_RING(6);
148
149 OUT_RING(GFX_OP_DRAWRECT_INFO);
150 OUT_RING(0);
151 OUT_RING(0);
152 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
153 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
154 OUT_RING(0);
155
156 ADVANCE_LP_RING();
157 }
158
159 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
160
161 upper[0] = upper[1] = 0;
162 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
163 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
164 lower[0] = sarea_priv->pipeA_y + slice[0];
165 lower[1] = sarea_priv->pipeB_y + slice[0];
166
167 spin_lock(&dev->drw_lock);
168
169 /* Emit blits for buffer swaps, partitioning both outputs into as many
170 * slices as there are buffer swaps scheduled in order to avoid tearing
171 * (based on the assumption that a single buffer swap would always
172 * complete before scanout starts).
173 */
174 for (i = 0; i++ < nhits;
175 upper[0] = lower[0], lower[0] += slice[0],
176 upper[1] = lower[1], lower[1] += slice[1]) {
177 if (i == nhits)
178 lower[0] = lower[1] = sarea_priv->height;
179
180 list_for_each(hit, &hits) {
181 drm_i915_vbl_swap_t *swap_hit =
182 list_entry(hit, drm_i915_vbl_swap_t, head);
183 struct drm_clip_rect *rect;
184 int num_rects, pipe;
185 unsigned short top, bottom;
186
187 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
188
189 if (!drw)
190 continue;
191
192 rect = drw->rects;
193 pipe = swap_hit->pipe;
194 top = upper[pipe];
195 bottom = lower[pipe];
196
197 for (num_rects = drw->num_rects; num_rects--; rect++) {
198 int y1 = max(rect->y1, top);
199 int y2 = min(rect->y2, bottom);
200
201 if (y1 >= y2)
202 continue;
203
204 BEGIN_LP_RING(8);
205
206 OUT_RING(cmd);
207 OUT_RING(ropcpp | dst_pitch);
208 OUT_RING((y1 << 16) | rect->x1);
209 OUT_RING((y2 << 16) | rect->x2);
210 OUT_RING(sarea_priv->front_offset);
211 OUT_RING((y1 << 16) | rect->x1);
212 OUT_RING(src_pitch);
213 OUT_RING(sarea_priv->back_offset);
214
215 ADVANCE_LP_RING();
216 }
217 }
218 }
219
220 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
221
222 list_for_each_safe(hit, tmp, &hits) {
223 drm_i915_vbl_swap_t *swap_hit =
224 list_entry(hit, drm_i915_vbl_swap_t, head);
225
226 list_del(hit);
227
228 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
229 }
230}
231
232irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
233{
234 struct drm_device *dev = (struct drm_device *) arg;
235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
236 u16 temp;
237 u32 pipea_stats, pipeb_stats;
238
239 pipea_stats = I915_READ(I915REG_PIPEASTAT);
240 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
241
242 temp = I915_READ16(I915REG_INT_IDENTITY_R);
243
244 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
245
246 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
247
248 if (temp == 0)
249 return IRQ_NONE;
250
251 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
252 (void) I915_READ16(I915REG_INT_IDENTITY_R);
253 DRM_READMEMORYBARRIER();
254
255 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
256
257 if (temp & USER_INT_FLAG)
258 DRM_WAKEUP(&dev_priv->irq_queue);
259
260 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
261 int vblank_pipe = dev_priv->vblank_pipe;
262
263 if ((vblank_pipe &
264 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
265 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
266 if (temp & VSYNC_PIPEA_FLAG)
267 atomic_inc(&dev->vbl_received);
268 if (temp & VSYNC_PIPEB_FLAG)
269 atomic_inc(&dev->vbl_received2);
270 } else if (((temp & VSYNC_PIPEA_FLAG) &&
271 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
272 ((temp & VSYNC_PIPEB_FLAG) &&
273 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
274 atomic_inc(&dev->vbl_received);
275
276 DRM_WAKEUP(&dev->vbl_queue);
277 drm_vbl_send_signals(dev);
278
279 if (dev_priv->swaps_pending > 0)
280 drm_locked_tasklet(dev, i915_vblank_tasklet);
281 I915_WRITE(I915REG_PIPEASTAT,
282 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
283 I915_VBLANK_CLEAR);
284 I915_WRITE(I915REG_PIPEBSTAT,
285 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
286 I915_VBLANK_CLEAR);
287 }
288
289 return IRQ_HANDLED;
290}
291
292static int i915_emit_irq(struct drm_device * dev)
293{
294 drm_i915_private_t *dev_priv = dev->dev_private;
295 RING_LOCALS;
296
297 i915_kernel_lost_context(dev);
298
299 DRM_DEBUG("\n");
300
301 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
302
303 if (dev_priv->counter > 0x7FFFFFFFUL)
304 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
305
306 BEGIN_LP_RING(6);
307 OUT_RING(CMD_STORE_DWORD_IDX);
308 OUT_RING(20);
309 OUT_RING(dev_priv->counter);
310 OUT_RING(0);
311 OUT_RING(0);
312 OUT_RING(GFX_OP_USER_INTERRUPT);
313 ADVANCE_LP_RING();
314
315 return dev_priv->counter;
316}
317
318static int i915_wait_irq(struct drm_device * dev, int irq_nr)
319{
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
321 int ret = 0;
322
323 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
324 READ_BREADCRUMB(dev_priv));
325
326 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
327 return 0;
328
329 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
330
331 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
332 READ_BREADCRUMB(dev_priv) >= irq_nr);
333
334 if (ret == -EBUSY) {
335 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
336 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
337 }
338
339 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
340 return ret;
341}
342
343static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
344 atomic_t *counter)
345{
346 drm_i915_private_t *dev_priv = dev->dev_private;
347 unsigned int cur_vblank;
348 int ret = 0;
349
350 if (!dev_priv) {
351 DRM_ERROR("called with no initialization\n");
352 return -EINVAL;
353 }
354
355 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
356 (((cur_vblank = atomic_read(counter))
357 - *sequence) <= (1<<23)));
358
359 *sequence = cur_vblank;
360
361 return ret;
362}
363
364
365int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
366{
367 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
368}
369
370int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
371{
372 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
373}
374
375/* Needs the lock as it touches the ring.
376 */
377int i915_irq_emit(struct drm_device *dev, void *data,
378 struct drm_file *file_priv)
379{
380 drm_i915_private_t *dev_priv = dev->dev_private;
381 drm_i915_irq_emit_t *emit = data;
382 int result;
383
384 LOCK_TEST_WITH_RETURN(dev, file_priv);
385
386 if (!dev_priv) {
387 DRM_ERROR("called with no initialization\n");
388 return -EINVAL;
389 }
390
391 result = i915_emit_irq(dev);
392
393 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
394 DRM_ERROR("copy_to_user\n");
395 return -EFAULT;
396 }
397
398 return 0;
399}
400
401/* Doesn't need the hardware lock.
402 */
403int i915_irq_wait(struct drm_device *dev, void *data,
404 struct drm_file *file_priv)
405{
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 drm_i915_irq_wait_t *irqwait = data;
408
409 if (!dev_priv) {
410 DRM_ERROR("called with no initialization\n");
411 return -EINVAL;
412 }
413
414 return i915_wait_irq(dev, irqwait->irq_seq);
415}
416
417static void i915_enable_interrupt (struct drm_device *dev)
418{
419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
420 u16 flag;
421
422 flag = 0;
423 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
424 flag |= VSYNC_PIPEA_FLAG;
425 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
426 flag |= VSYNC_PIPEB_FLAG;
427
428 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
429}
430
431/* Set the vblank monitor pipe
432 */
433int i915_vblank_pipe_set(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
435{
436 drm_i915_private_t *dev_priv = dev->dev_private;
437 drm_i915_vblank_pipe_t *pipe = data;
438
439 if (!dev_priv) {
440 DRM_ERROR("called with no initialization\n");
441 return -EINVAL;
442 }
443
444 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
445 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
446 return -EINVAL;
447 }
448
449 dev_priv->vblank_pipe = pipe->pipe;
450
451 i915_enable_interrupt (dev);
452
453 return 0;
454}
455
456int i915_vblank_pipe_get(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 drm_i915_private_t *dev_priv = dev->dev_private;
460 drm_i915_vblank_pipe_t *pipe = data;
461 u16 flag;
462
463 if (!dev_priv) {
464 DRM_ERROR("called with no initialization\n");
465 return -EINVAL;
466 }
467
468 flag = I915_READ(I915REG_INT_ENABLE_R);
469 pipe->pipe = 0;
470 if (flag & VSYNC_PIPEA_FLAG)
471 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
472 if (flag & VSYNC_PIPEB_FLAG)
473 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
474
475 return 0;
476}
477
478/**
479 * Schedule buffer swap at given vertical blank.
480 */
481int i915_vblank_swap(struct drm_device *dev, void *data,
482 struct drm_file *file_priv)
483{
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 drm_i915_vblank_swap_t *swap = data;
486 drm_i915_vbl_swap_t *vbl_swap;
487 unsigned int pipe, seqtype, curseq;
488 unsigned long irqflags;
489 struct list_head *list;
490
491 if (!dev_priv) {
492 DRM_ERROR("%s called with no initialization\n", __func__);
493 return -EINVAL;
494 }
495
496 if (dev_priv->sarea_priv->rotation) {
497 DRM_DEBUG("Rotation not supported\n");
498 return -EINVAL;
499 }
500
501 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
502 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
503 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
504 return -EINVAL;
505 }
506
507 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
508
509 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
510
511 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
512 DRM_ERROR("Invalid pipe %d\n", pipe);
513 return -EINVAL;
514 }
515
516 spin_lock_irqsave(&dev->drw_lock, irqflags);
517
518 if (!drm_get_drawable_info(dev, swap->drawable)) {
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
520 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
521 return -EINVAL;
522 }
523
524 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
525
526 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
527
528 if (seqtype == _DRM_VBLANK_RELATIVE)
529 swap->sequence += curseq;
530
531 if ((curseq - swap->sequence) <= (1<<23)) {
532 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
533 swap->sequence = curseq + 1;
534 } else {
535 DRM_DEBUG("Missed target sequence\n");
536 return -EINVAL;
537 }
538 }
539
540 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
541
542 list_for_each(list, &dev_priv->vbl_swaps.head) {
543 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
544
545 if (vbl_swap->drw_id == swap->drawable &&
546 vbl_swap->pipe == pipe &&
547 vbl_swap->sequence == swap->sequence) {
548 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
549 DRM_DEBUG("Already scheduled\n");
550 return 0;
551 }
552 }
553
554 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
555
556 if (dev_priv->swaps_pending >= 100) {
557 DRM_DEBUG("Too many swaps queued\n");
558 return -EBUSY;
559 }
560
561 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
562
563 if (!vbl_swap) {
564 DRM_ERROR("Failed to allocate memory to queue swap\n");
565 return -ENOMEM;
566 }
567
568 DRM_DEBUG("\n");
569
570 vbl_swap->drw_id = swap->drawable;
571 vbl_swap->pipe = pipe;
572 vbl_swap->sequence = swap->sequence;
573
574 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
575
576 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
577 dev_priv->swaps_pending++;
578
579 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
580
581 return 0;
582}
583
584/* drm_dma.h hooks
585*/
586void i915_driver_irq_preinstall(struct drm_device * dev)
587{
588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589
590 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
591 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
592 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
593}
594
595void i915_driver_irq_postinstall(struct drm_device * dev)
596{
597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
598
599 spin_lock_init(&dev_priv->swaps_lock);
600 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
601 dev_priv->swaps_pending = 0;
602
603 if (!dev_priv->vblank_pipe)
604 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
605 i915_enable_interrupt(dev);
606 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
607}
608
609void i915_driver_irq_uninstall(struct drm_device * dev)
610{
611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
612 u16 temp;
613
614 if (!dev_priv)
615 return;
616
617 I915_WRITE16(I915REG_HWSTAM, 0xffff);
618 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
619 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
620
621 temp = I915_READ16(I915REG_INT_IDENTITY_R);
622 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
623}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
new file mode 100644
index 000000000000..6126a60dc9cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -0,0 +1,386 @@
1/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/* This memory manager is integrated into the global/local lru
35 * mechanisms used by the clients. Specifically, it operates by
36 * setting the 'in_use' fields of the global LRU to indicate whether
37 * this region is privately allocated to a client.
38 *
39 * This does require the client to actually respect that field.
40 *
41 * Currently no effort is made to allocate 'private' memory in any
42 * clever way - the LRU information isn't used to determine which
43 * block to allocate, and the ring is drained prior to allocations --
44 * in other words allocation is expensive.
45 */
46static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
47{
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
50 struct drm_tex_region *list;
51 unsigned shift, nr;
52 unsigned start;
53 unsigned end;
54 unsigned i;
55 int age;
56
57 shift = dev_priv->tex_lru_log_granularity;
58 nr = I915_NR_TEX_REGIONS;
59
60 start = p->start >> shift;
61 end = (p->start + p->size - 1) >> shift;
62
63 age = ++sarea_priv->texAge;
64 list = sarea_priv->texList;
65
66 /* Mark the regions with the new flag and update their age. Move
67 * them to head of list to preserve LRU semantics.
68 */
69 for (i = start; i <= end; i++) {
70 list[i].in_use = in_use;
71 list[i].age = age;
72
73 /* remove_from_list(i)
74 */
75 list[(unsigned)list[i].next].prev = list[i].prev;
76 list[(unsigned)list[i].prev].next = list[i].next;
77
78 /* insert_at_head(list, i)
79 */
80 list[i].prev = nr;
81 list[i].next = list[nr].next;
82 list[(unsigned)list[nr].next].prev = i;
83 list[nr].next = i;
84 }
85}
86
87/* Very simple allocator for agp memory, working on a static range
88 * already mapped into each client's address space.
89 */
90
91static struct mem_block *split_block(struct mem_block *p, int start, int size,
92 struct drm_file *file_priv)
93{
94 /* Maybe cut off the start of an existing block */
95 if (start > p->start) {
96 struct mem_block *newblock =
97 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
98 if (!newblock)
99 goto out;
100 newblock->start = start;
101 newblock->size = p->size - (start - p->start);
102 newblock->file_priv = NULL;
103 newblock->next = p->next;
104 newblock->prev = p;
105 p->next->prev = newblock;
106 p->next = newblock;
107 p->size -= newblock->size;
108 p = newblock;
109 }
110
111 /* Maybe cut off the end of an existing block */
112 if (size < p->size) {
113 struct mem_block *newblock =
114 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
115 if (!newblock)
116 goto out;
117 newblock->start = start + size;
118 newblock->size = p->size - size;
119 newblock->file_priv = NULL;
120 newblock->next = p->next;
121 newblock->prev = p;
122 p->next->prev = newblock;
123 p->next = newblock;
124 p->size = size;
125 }
126
127 out:
128 /* Our block is in the middle */
129 p->file_priv = file_priv;
130 return p;
131}
132
133static struct mem_block *alloc_block(struct mem_block *heap, int size,
134 int align2, struct drm_file *file_priv)
135{
136 struct mem_block *p;
137 int mask = (1 << align2) - 1;
138
139 for (p = heap->next; p != heap; p = p->next) {
140 int start = (p->start + mask) & ~mask;
141 if (p->file_priv == NULL && start + size <= p->start + p->size)
142 return split_block(p, start, size, file_priv);
143 }
144
145 return NULL;
146}
147
148static struct mem_block *find_block(struct mem_block *heap, int start)
149{
150 struct mem_block *p;
151
152 for (p = heap->next; p != heap; p = p->next)
153 if (p->start == start)
154 return p;
155
156 return NULL;
157}
158
159static void free_block(struct mem_block *p)
160{
161 p->file_priv = NULL;
162
163 /* Assumes a single contiguous range. Needs a special file_priv in
164 * 'heap' to stop it being subsumed.
165 */
166 if (p->next->file_priv == NULL) {
167 struct mem_block *q = p->next;
168 p->size += q->size;
169 p->next = q->next;
170 p->next->prev = p;
171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
172 }
173
174 if (p->prev->file_priv == NULL) {
175 struct mem_block *q = p->prev;
176 q->size += p->size;
177 q->next = p->next;
178 q->next->prev = q;
179 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
180 }
181}
182
183/* Initialize. How to check for an uninitialized heap?
184 */
185static int init_heap(struct mem_block **heap, int start, int size)
186{
187 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
188
189 if (!blocks)
190 return -ENOMEM;
191
192 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
193 if (!*heap) {
194 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
195 return -ENOMEM;
196 }
197
198 blocks->start = start;
199 blocks->size = size;
200 blocks->file_priv = NULL;
201 blocks->next = blocks->prev = *heap;
202
203 memset(*heap, 0, sizeof(**heap));
204 (*heap)->file_priv = (struct drm_file *) - 1;
205 (*heap)->next = (*heap)->prev = blocks;
206 return 0;
207}
208
209/* Free all blocks associated with the releasing file.
210 */
211void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
212 struct mem_block *heap)
213{
214 struct mem_block *p;
215
216 if (!heap || !heap->next)
217 return;
218
219 for (p = heap->next; p != heap; p = p->next) {
220 if (p->file_priv == file_priv) {
221 p->file_priv = NULL;
222 mark_block(dev, p, 0);
223 }
224 }
225
226 /* Assumes a single contiguous range. Needs a special file_priv in
227 * 'heap' to stop it being subsumed.
228 */
229 for (p = heap->next; p != heap; p = p->next) {
230 while (p->file_priv == NULL && p->next->file_priv == NULL) {
231 struct mem_block *q = p->next;
232 p->size += q->size;
233 p->next = q->next;
234 p->next->prev = p;
235 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
236 }
237 }
238}
239
240/* Shutdown.
241 */
242void i915_mem_takedown(struct mem_block **heap)
243{
244 struct mem_block *p;
245
246 if (!*heap)
247 return;
248
249 for (p = (*heap)->next; p != *heap;) {
250 struct mem_block *q = p;
251 p = p->next;
252 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
253 }
254
255 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
256 *heap = NULL;
257}
258
259static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
260{
261 switch (region) {
262 case I915_MEM_REGION_AGP:
263 return &dev_priv->agp_heap;
264 default:
265 return NULL;
266 }
267}
268
269/* IOCTL HANDLERS */
270
271int i915_mem_alloc(struct drm_device *dev, void *data,
272 struct drm_file *file_priv)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 drm_i915_mem_alloc_t *alloc = data;
276 struct mem_block *block, **heap;
277
278 if (!dev_priv) {
279 DRM_ERROR("called with no initialization\n");
280 return -EINVAL;
281 }
282
283 heap = get_heap(dev_priv, alloc->region);
284 if (!heap || !*heap)
285 return -EFAULT;
286
287 /* Make things easier on ourselves: all allocations at least
288 * 4k aligned.
289 */
290 if (alloc->alignment < 12)
291 alloc->alignment = 12;
292
293 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
294
295 if (!block)
296 return -ENOMEM;
297
298 mark_block(dev, block, 1);
299
300 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
301 sizeof(int))) {
302 DRM_ERROR("copy_to_user\n");
303 return -EFAULT;
304 }
305
306 return 0;
307}
308
309int i915_mem_free(struct drm_device *dev, void *data,
310 struct drm_file *file_priv)
311{
312 drm_i915_private_t *dev_priv = dev->dev_private;
313 drm_i915_mem_free_t *memfree = data;
314 struct mem_block *block, **heap;
315
316 if (!dev_priv) {
317 DRM_ERROR("called with no initialization\n");
318 return -EINVAL;
319 }
320
321 heap = get_heap(dev_priv, memfree->region);
322 if (!heap || !*heap)
323 return -EFAULT;
324
325 block = find_block(*heap, memfree->region_offset);
326 if (!block)
327 return -EFAULT;
328
329 if (block->file_priv != file_priv)
330 return -EPERM;
331
332 mark_block(dev, block, 0);
333 free_block(block);
334 return 0;
335}
336
337int i915_mem_init_heap(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
339{
340 drm_i915_private_t *dev_priv = dev->dev_private;
341 drm_i915_mem_init_heap_t *initheap = data;
342 struct mem_block **heap;
343
344 if (!dev_priv) {
345 DRM_ERROR("called with no initialization\n");
346 return -EINVAL;
347 }
348
349 heap = get_heap(dev_priv, initheap->region);
350 if (!heap)
351 return -EFAULT;
352
353 if (*heap) {
354 DRM_ERROR("heap already initialized?");
355 return -EFAULT;
356 }
357
358 return init_heap(heap, initheap->start, initheap->size);
359}
360
361int i915_mem_destroy_heap( struct drm_device *dev, void *data,
362 struct drm_file *file_priv )
363{
364 drm_i915_private_t *dev_priv = dev->dev_private;
365 drm_i915_mem_destroy_heap_t *destroyheap = data;
366 struct mem_block **heap;
367
368 if ( !dev_priv ) {
369 DRM_ERROR( "called with no initialization\n" );
370 return -EINVAL;
371 }
372
373 heap = get_heap( dev_priv, destroyheap->region );
374 if (!heap) {
375 DRM_ERROR("get_heap failed");
376 return -EFAULT;
377 }
378
379 if (!*heap) {
380 DRM_ERROR("heap not initialized?");
381 return -EFAULT;
382 }
383
384 i915_mem_takedown( heap );
385 return 0;
386}