aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c52
-rw-r--r--drivers/gpu/drm/drm_cache.c69
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c421
-rw-r--r--drivers/gpu/drm/drm_irq.c464
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c5
-rw-r--r--drivers/gpu/drm/drm_proc.c135
-rw-r--r--drivers/gpu/drm/drm_stub.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c332
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c476
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1180
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2558
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c201
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c292
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c257
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c514
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c371
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1417
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c509
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h6
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c74
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c29
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h11
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c55
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c268
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c10
-rw-r--r--drivers/gpu/drm/via/via_drv.c26
-rw-r--r--drivers/gpu/drm/via/via_drv.h16
-rw-r--r--drivers/gpu/drm/via/via_irq.c105
-rw-r--r--drivers/gpu/drm/via/via_mm.c3
-rw-r--r--include/drm/drm.h63
-rw-r--r--include/drm/drmP.h249
-rw-r--r--include/drm/drm_pciids.h54
-rw-r--r--include/drm/i915_drm.h333
-rw-r--r--mm/shmem.c1
49 files changed, 8814 insertions, 1964 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 165c871ba9af..bcc079c282dd 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
137 137
138 return (void*) vaddr; 138 return (void*) vaddr;
139} 139}
140EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
140 141
141struct page *kmap_atomic_to_page(void *ptr) 142struct page *kmap_atomic_to_page(void *ptr)
142{ 143{
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 610d6fd5bb50..9097500de5f4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
6# 6#
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG 9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
10 help 10 help
11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 11 Kernel-level support for the Direct Rendering Infrastructure (DRI)
12 introduced in XFree86 4.0. If you say Y here, you need to select 12 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -87,6 +87,7 @@ config DRM_MGA
87config DRM_SIS 87config DRM_SIS
88 tristate "SiS video cards" 88 tristate "SiS video cards"
89 depends on DRM && AGP 89 depends on DRM && AGP
90 depends on FB_SIS || FB_SIS=n
90 help 91 help
91 Choose this option if you have a SiS 630 or compatible video 92 Choose this option if you have a SiS 630 or compatible video
92 chipset. If M is selected the module will be called sis. AGP 93 chipset. If M is selected the module will be called sis. AGP
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e9f9a97ae00a..74da99495e21 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,8 +4,9 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 7drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 8 drm_context.o drm_dma.o drm_drawable.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index aefa5ac4c0b1..3d33b8252b58 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
36 37
37#if __OS_HAS_AGP 38#if __OS_HAS_AGP
38 39
@@ -452,4 +453,53 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
452 return agp_unbind_memory(handle); 453 return agp_unbind_memory(handle);
453} 454}
454 455
455#endif /* __OS_HAS_AGP */ 456/**
457 * Binds a collection of pages into AGP memory at the given offset, returning
458 * the AGP memory structure containing them.
459 *
460 * No reference is held on the pages during this time -- it is up to the
461 * caller to handle that.
462 */
463DRM_AGP_MEM *
464drm_agp_bind_pages(struct drm_device *dev,
465 struct page **pages,
466 unsigned long num_pages,
467 uint32_t gtt_offset,
468 u32 type)
469{
470 DRM_AGP_MEM *mem;
471 int ret, i;
472
473 DRM_DEBUG("\n");
474
475 mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
476 type);
477 if (mem == NULL) {
478 DRM_ERROR("Failed to allocate memory for %ld pages\n",
479 num_pages);
480 return NULL;
481 }
482
483 for (i = 0; i < num_pages; i++)
484 mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
485 mem->page_count = num_pages;
486
487 mem->is_flushed = true;
488 ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
489 if (ret != 0) {
490 DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
491 agp_free_memory(mem);
492 return NULL;
493 }
494
495 return mem;
496}
497EXPORT_SYMBOL(drm_agp_bind_pages);
498
499void drm_agp_chipset_flush(struct drm_device *dev)
500{
501 agp_flush_chipset(dev->agp->bridge);
502}
503EXPORT_SYMBOL(drm_agp_chipset_flush);
504
505#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
new file mode 100644
index 000000000000..0e994a0e46d4
--- /dev/null
+++ b/drivers/gpu/drm/drm_cache.c
@@ -0,0 +1,69 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29 */
30
31#include "drmP.h"
32
33#if defined(CONFIG_X86)
34static void
35drm_clflush_page(struct page *page)
36{
37 uint8_t *page_virtual;
38 unsigned int i;
39
40 if (unlikely(page == NULL))
41 return;
42
43 page_virtual = kmap_atomic(page, KM_USER0);
44 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
45 clflush(page_virtual + i);
46 kunmap_atomic(page_virtual, KM_USER0);
47}
48#endif
49
50void
51drm_clflush_pages(struct page *pages[], unsigned long num_pages)
52{
53
54#if defined(CONFIG_X86)
55 if (cpu_has_clflush) {
56 unsigned long i;
57
58 mb();
59 for (i = 0; i < num_pages; ++i)
60 drm_clflush_page(*pages++);
61 mb();
62
63 return;
64 }
65
66 wbinvd();
67#endif
68}
69EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 452c2d866ec5..96f416afc3f6 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -116,7 +116,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
116 116
117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
118 118
119 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
120
119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
122
123 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
124 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
125 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
120}; 126};
121 127
122#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 128#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 851a53f1acce..0d46627663b1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -246,7 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
246 memset(priv, 0, sizeof(*priv)); 246 memset(priv, 0, sizeof(*priv));
247 filp->private_data = priv; 247 filp->private_data = priv;
248 priv->filp = filp; 248 priv->filp = filp;
249 priv->uid = current->euid; 249 priv->uid = current_euid();
250 priv->pid = task_pid_nr(current); 250 priv->pid = task_pid_nr(current);
251 priv->minor = idr_find(&drm_minors_idr, minor_id); 251 priv->minor = idr_find(&drm_minors_idr, minor_id);
252 priv->ioctl_count = 0; 252 priv->ioctl_count = 0;
@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
256 256
257 INIT_LIST_HEAD(&priv->lhead); 257 INIT_LIST_HEAD(&priv->lhead);
258 258
259 if (dev->driver->driver_features & DRIVER_GEM)
260 drm_gem_open(dev, priv);
261
259 if (dev->driver->open) { 262 if (dev->driver->open) {
260 ret = dev->driver->open(dev, priv); 263 ret = dev->driver->open(dev, priv);
261 if (ret < 0) 264 if (ret < 0)
@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
400 dev->driver->reclaim_buffers(dev, file_priv); 403 dev->driver->reclaim_buffers(dev, file_priv);
401 } 404 }
402 405
406 if (dev->driver->driver_features & DRIVER_GEM)
407 drm_gem_release(dev, file_priv);
408
403 drm_fasync(-1, filp, 0); 409 drm_fasync(-1, filp, 0);
404 410
405 mutex_lock(&dev->ctxlist_mutex); 411 mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
new file mode 100644
index 000000000000..ccd1afdede02
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem.c
@@ -0,0 +1,421 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include "drmP.h"
38
39/** @file drm_gem.c
40 *
41 * This file provides some of the base ioctls and library routines for
42 * the graphics memory manager implemented by each device driver.
43 *
44 * Because various devices have different requirements in terms of
45 * synchronization and migration strategies, implementing that is left up to
46 * the driver, and all that the general API provides should be generic --
47 * allocating objects, reading/writing data with the cpu, freeing objects.
48 * Even there, platform-dependent optimizations for reading/writing data with
49 * the CPU mean we'll likely hook those out to driver-specific calls. However,
50 * the DRI2 implementation wants to have at least allocate/mmap be generic.
51 *
52 * The goal was to have swap-backed object allocation managed through
53 * struct file. However, file descriptors as handles to a struct file have
54 * two major failings:
55 * - Process limits prevent more than 1024 or so being used at a time by
56 * default.
57 * - Inability to allocate high fds will aggravate the X Server's select()
58 * handling, and likely that of many GL client applications as well.
59 *
60 * This led to a plan of using our own integer IDs (called handles, following
61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62 * ioctls. The objects themselves will still include the struct file so
63 * that we can transition to fds if the required kernel infrastructure shows
64 * up at a later date, and as our interface with shmfs for memory allocation.
65 */
66
67/**
68 * Initialize the GEM device fields
69 */
70
71int
72drm_gem_init(struct drm_device *dev)
73{
74 spin_lock_init(&dev->object_name_lock);
75 idr_init(&dev->object_name_idr);
76 atomic_set(&dev->object_count, 0);
77 atomic_set(&dev->object_memory, 0);
78 atomic_set(&dev->pin_count, 0);
79 atomic_set(&dev->pin_memory, 0);
80 atomic_set(&dev->gtt_count, 0);
81 atomic_set(&dev->gtt_memory, 0);
82 return 0;
83}
84
85/**
86 * Allocate a GEM object of the specified size with shmfs backing store
87 */
88struct drm_gem_object *
89drm_gem_object_alloc(struct drm_device *dev, size_t size)
90{
91 struct drm_gem_object *obj;
92
93 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
94
95 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
96
97 obj->dev = dev;
98 obj->filp = shmem_file_setup("drm mm object", size, 0);
99 if (IS_ERR(obj->filp)) {
100 kfree(obj);
101 return NULL;
102 }
103
104 kref_init(&obj->refcount);
105 kref_init(&obj->handlecount);
106 obj->size = size;
107 if (dev->driver->gem_init_object != NULL &&
108 dev->driver->gem_init_object(obj) != 0) {
109 fput(obj->filp);
110 kfree(obj);
111 return NULL;
112 }
113 atomic_inc(&dev->object_count);
114 atomic_add(obj->size, &dev->object_memory);
115 return obj;
116}
117EXPORT_SYMBOL(drm_gem_object_alloc);
118
119/**
120 * Removes the mapping from handle to filp for this object.
121 */
122static int
123drm_gem_handle_delete(struct drm_file *filp, int handle)
124{
125 struct drm_device *dev;
126 struct drm_gem_object *obj;
127
128 /* This is gross. The idr system doesn't let us try a delete and
129 * return an error code. It just spews if you fail at deleting.
130 * So, we have to grab a lock around finding the object and then
131 * doing the delete on it and dropping the refcount, or the user
132 * could race us to double-decrement the refcount and cause a
133 * use-after-free later. Given the frequency of our handle lookups,
134 * we may want to use ida for number allocation and a hash table
135 * for the pointers, anyway.
136 */
137 spin_lock(&filp->table_lock);
138
139 /* Check if we currently have a reference on the object */
140 obj = idr_find(&filp->object_idr, handle);
141 if (obj == NULL) {
142 spin_unlock(&filp->table_lock);
143 return -EINVAL;
144 }
145 dev = obj->dev;
146
147 /* Release reference and decrement refcount. */
148 idr_remove(&filp->object_idr, handle);
149 spin_unlock(&filp->table_lock);
150
151 mutex_lock(&dev->struct_mutex);
152 drm_gem_object_handle_unreference(obj);
153 mutex_unlock(&dev->struct_mutex);
154
155 return 0;
156}
157
158/**
159 * Create a handle for this object. This adds a handle reference
160 * to the object, which includes a regular reference count. Callers
161 * will likely want to dereference the object afterwards.
162 */
163int
164drm_gem_handle_create(struct drm_file *file_priv,
165 struct drm_gem_object *obj,
166 int *handlep)
167{
168 int ret;
169
170 /*
171 * Get the user-visible handle using idr.
172 */
173again:
174 /* ensure there is space available to allocate a handle */
175 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
176 return -ENOMEM;
177
178 /* do the allocation under our spinlock */
179 spin_lock(&file_priv->table_lock);
180 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
181 spin_unlock(&file_priv->table_lock);
182 if (ret == -EAGAIN)
183 goto again;
184
185 if (ret != 0)
186 return ret;
187
188 drm_gem_object_handle_reference(obj);
189 return 0;
190}
191EXPORT_SYMBOL(drm_gem_handle_create);
192
193/** Returns a reference to the object named by the handle. */
194struct drm_gem_object *
195drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
196 int handle)
197{
198 struct drm_gem_object *obj;
199
200 spin_lock(&filp->table_lock);
201
202 /* Check if we currently have a reference on the object */
203 obj = idr_find(&filp->object_idr, handle);
204 if (obj == NULL) {
205 spin_unlock(&filp->table_lock);
206 return NULL;
207 }
208
209 drm_gem_object_reference(obj);
210
211 spin_unlock(&filp->table_lock);
212
213 return obj;
214}
215EXPORT_SYMBOL(drm_gem_object_lookup);
216
217/**
218 * Releases the handle to an mm object.
219 */
220int
221drm_gem_close_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct drm_gem_close *args = data;
225 int ret;
226
227 if (!(dev->driver->driver_features & DRIVER_GEM))
228 return -ENODEV;
229
230 ret = drm_gem_handle_delete(file_priv, args->handle);
231
232 return ret;
233}
234
235/**
236 * Create a global name for an object, returning the name.
237 *
238 * Note that the name does not hold a reference; when the object
239 * is freed, the name goes away.
240 */
241int
242drm_gem_flink_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *file_priv)
244{
245 struct drm_gem_flink *args = data;
246 struct drm_gem_object *obj;
247 int ret;
248
249 if (!(dev->driver->driver_features & DRIVER_GEM))
250 return -ENODEV;
251
252 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
253 if (obj == NULL)
254 return -EBADF;
255
256again:
257 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
258 return -ENOMEM;
259
260 spin_lock(&dev->object_name_lock);
261 if (obj->name) {
262 args->name = obj->name;
263 spin_unlock(&dev->object_name_lock);
264 return 0;
265 }
266 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
267 &obj->name);
268 spin_unlock(&dev->object_name_lock);
269 if (ret == -EAGAIN)
270 goto again;
271
272 if (ret != 0) {
273 mutex_lock(&dev->struct_mutex);
274 drm_gem_object_unreference(obj);
275 mutex_unlock(&dev->struct_mutex);
276 return ret;
277 }
278
279 /*
280 * Leave the reference from the lookup around as the
281 * name table now holds one
282 */
283 args->name = (uint64_t) obj->name;
284
285 return 0;
286}
287
288/**
289 * Open an object using the global name, returning a handle and the size.
290 *
291 * This handle (of course) holds a reference to the object, so the object
292 * will not go away until the handle is deleted.
293 */
294int
295drm_gem_open_ioctl(struct drm_device *dev, void *data,
296 struct drm_file *file_priv)
297{
298 struct drm_gem_open *args = data;
299 struct drm_gem_object *obj;
300 int ret;
301 int handle;
302
303 if (!(dev->driver->driver_features & DRIVER_GEM))
304 return -ENODEV;
305
306 spin_lock(&dev->object_name_lock);
307 obj = idr_find(&dev->object_name_idr, (int) args->name);
308 if (obj)
309 drm_gem_object_reference(obj);
310 spin_unlock(&dev->object_name_lock);
311 if (!obj)
312 return -ENOENT;
313
314 ret = drm_gem_handle_create(file_priv, obj, &handle);
315 mutex_lock(&dev->struct_mutex);
316 drm_gem_object_unreference(obj);
317 mutex_unlock(&dev->struct_mutex);
318 if (ret)
319 return ret;
320
321 args->handle = handle;
322 args->size = obj->size;
323
324 return 0;
325}
326
327/**
328 * Called at device open time, sets up the structure for handling refcounting
329 * of mm objects.
330 */
331void
332drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
333{
334 idr_init(&file_private->object_idr);
335 spin_lock_init(&file_private->table_lock);
336}
337
338/**
339 * Called at device close to release the file's
340 * handle references on objects.
341 */
342static int
343drm_gem_object_release_handle(int id, void *ptr, void *data)
344{
345 struct drm_gem_object *obj = ptr;
346
347 drm_gem_object_handle_unreference(obj);
348
349 return 0;
350}
351
352/**
353 * Called at close time when the filp is going away.
354 *
355 * Releases any remaining references on objects by this filp.
356 */
357void
358drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
359{
360 mutex_lock(&dev->struct_mutex);
361 idr_for_each(&file_private->object_idr,
362 &drm_gem_object_release_handle, NULL);
363
364 idr_destroy(&file_private->object_idr);
365 mutex_unlock(&dev->struct_mutex);
366}
367
368/**
369 * Called after the last reference to the object has been lost.
370 *
371 * Frees the object
372 */
373void
374drm_gem_object_free(struct kref *kref)
375{
376 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
377 struct drm_device *dev = obj->dev;
378
379 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
380
381 if (dev->driver->gem_free_object != NULL)
382 dev->driver->gem_free_object(obj);
383
384 fput(obj->filp);
385 atomic_dec(&dev->object_count);
386 atomic_sub(obj->size, &dev->object_memory);
387 kfree(obj);
388}
389EXPORT_SYMBOL(drm_gem_object_free);
390
391/**
392 * Called after the last handle to the object has been closed
393 *
394 * Removes any name for the object. Note that this must be
395 * called before drm_gem_object_free or we'll be touching
396 * freed memory
397 */
398void
399drm_gem_object_handle_free(struct kref *kref)
400{
401 struct drm_gem_object *obj = container_of(kref,
402 struct drm_gem_object,
403 handlecount);
404 struct drm_device *dev = obj->dev;
405
406 /* Remove any name for this object */
407 spin_lock(&dev->object_name_lock);
408 if (obj->name) {
409 idr_remove(&dev->object_name_idr, obj->name);
410 spin_unlock(&dev->object_name_lock);
411 /*
412 * The object name held a reference to this object, drop
413 * that now.
414 */
415 drm_gem_object_unreference(obj);
416 } else
417 spin_unlock(&dev->object_name_lock);
418
419}
420EXPORT_SYMBOL(drm_gem_object_handle_free);
421
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53f0e5af1cc8..4091b9e291f9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64 return -EINVAL; 64 return -EINVAL;
65 65
66 p->irq = dev->irq; 66 p->irq = dev->pdev->irq;
67 67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq); 69 p->irq);
@@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
71 return 0; 71 return 0;
72} 72}
73 73
74static void vblank_disable_fn(unsigned long arg)
75{
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
78 int i;
79
80 if (!dev->vblank_disable_allowed)
81 return;
82
83 for (i = 0; i < dev->num_crtcs; i++) {
84 spin_lock_irqsave(&dev->vbl_lock, irqflags);
85 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
86 dev->vblank_enabled[i]) {
87 DRM_DEBUG("disabling vblank on crtc %d\n", i);
88 dev->last_vblank[i] =
89 dev->driver->get_vblank_counter(dev, i);
90 dev->driver->disable_vblank(dev, i);
91 dev->vblank_enabled[i] = 0;
92 }
93 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
94 }
95}
96
97static void drm_vblank_cleanup(struct drm_device *dev)
98{
99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0)
101 return;
102
103 del_timer(&dev->vblank_disable_timer);
104
105 vblank_disable_fn((unsigned long)dev);
106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
108 DRM_MEM_DRIVER);
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
110 DRM_MEM_DRIVER);
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
116 dev->num_crtcs, DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
118 DRM_MEM_DRIVER);
119 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
120 dev->num_crtcs, DRM_MEM_DRIVER);
121
122 dev->num_crtcs = 0;
123}
124
125int drm_vblank_init(struct drm_device *dev, int num_crtcs)
126{
127 int i, ret = -ENOMEM;
128
129 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
130 (unsigned long)dev);
131 spin_lock_init(&dev->vbl_lock);
132 atomic_set(&dev->vbl_signal_pending, 0);
133 dev->num_crtcs = num_crtcs;
134
135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
136 DRM_MEM_DRIVER);
137 if (!dev->vbl_queue)
138 goto err;
139
140 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
141 DRM_MEM_DRIVER);
142 if (!dev->vbl_sigs)
143 goto err;
144
145 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
146 DRM_MEM_DRIVER);
147 if (!dev->_vblank_count)
148 goto err;
149
150 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
151 DRM_MEM_DRIVER);
152 if (!dev->vblank_refcount)
153 goto err;
154
155 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
156 DRM_MEM_DRIVER);
157 if (!dev->vblank_enabled)
158 goto err;
159
160 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
161 if (!dev->last_vblank)
162 goto err;
163
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
165 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset)
167 goto err;
168
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
175 }
176
177 dev->vblank_disable_allowed = 0;
178
179 return 0;
180
181err:
182 drm_vblank_cleanup(dev);
183 return ret;
184}
185EXPORT_SYMBOL(drm_vblank_init);
186
74/** 187/**
75 * Install IRQ handler. 188 * Install IRQ handler.
76 * 189 *
77 * \param dev DRM device. 190 * \param dev DRM device.
78 * \param irq IRQ number.
79 * 191 *
80 * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver 192 * Initializes the IRQ related data. Installs the handler, calling the driver
81 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions 193 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
82 * before and after the installation. 194 * before and after the installation.
83 */ 195 */
84static int drm_irq_install(struct drm_device * dev) 196int drm_irq_install(struct drm_device *dev)
85{ 197{
86 int ret; 198 int ret = 0;
87 unsigned long sh_flags = 0; 199 unsigned long sh_flags = 0;
88 200
89 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
90 return -EINVAL; 202 return -EINVAL;
91 203
92 if (dev->irq == 0) 204 if (dev->pdev->irq == 0)
93 return -EINVAL; 205 return -EINVAL;
94 206
95 mutex_lock(&dev->struct_mutex); 207 mutex_lock(&dev->struct_mutex);
@@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_device * dev)
107 dev->irq_enabled = 1; 219 dev->irq_enabled = 1;
108 mutex_unlock(&dev->struct_mutex); 220 mutex_unlock(&dev->struct_mutex);
109 221
110 DRM_DEBUG("irq=%d\n", dev->irq); 222 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
111
112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 init_waitqueue_head(&dev->vbl_queue);
114
115 spin_lock_init(&dev->vbl_lock);
116
117 INIT_LIST_HEAD(&dev->vbl_sigs);
118 INIT_LIST_HEAD(&dev->vbl_sigs2);
119
120 dev->vbl_pending = 0;
121 }
122 223
123 /* Before installing handler */ 224 /* Before installing handler */
124 dev->driver->irq_preinstall(dev); 225 dev->driver->irq_preinstall(dev);
@@ -127,8 +228,9 @@ static int drm_irq_install(struct drm_device * dev)
127 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
128 sh_flags = IRQF_SHARED; 229 sh_flags = IRQF_SHARED;
129 230
130 ret = request_irq(dev->irq, dev->driver->irq_handler, 231 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
131 sh_flags, dev->devname, dev); 232 sh_flags, dev->devname, dev);
233
132 if (ret < 0) { 234 if (ret < 0) {
133 mutex_lock(&dev->struct_mutex); 235 mutex_lock(&dev->struct_mutex);
134 dev->irq_enabled = 0; 236 dev->irq_enabled = 0;
@@ -137,10 +239,16 @@ static int drm_irq_install(struct drm_device * dev)
137 } 239 }
138 240
139 /* After installing handler */ 241 /* After installing handler */
140 dev->driver->irq_postinstall(dev); 242 ret = dev->driver->irq_postinstall(dev);
243 if (ret < 0) {
244 mutex_lock(&dev->struct_mutex);
245 dev->irq_enabled = 0;
246 mutex_unlock(&dev->struct_mutex);
247 }
141 248
142 return 0; 249 return ret;
143} 250}
251EXPORT_SYMBOL(drm_irq_install);
144 252
145/** 253/**
146 * Uninstall the IRQ handler. 254 * Uninstall the IRQ handler.
@@ -164,17 +272,18 @@ int drm_irq_uninstall(struct drm_device * dev)
164 if (!irq_enabled) 272 if (!irq_enabled)
165 return -EINVAL; 273 return -EINVAL;
166 274
167 DRM_DEBUG("irq=%d\n", dev->irq); 275 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
168 276
169 dev->driver->irq_uninstall(dev); 277 dev->driver->irq_uninstall(dev);
170 278
171 free_irq(dev->irq, dev); 279 free_irq(dev->pdev->irq, dev);
280
281 drm_vblank_cleanup(dev);
172 282
173 dev->locked_tasklet_func = NULL; 283 dev->locked_tasklet_func = NULL;
174 284
175 return 0; 285 return 0;
176} 286}
177
178EXPORT_SYMBOL(drm_irq_uninstall); 287EXPORT_SYMBOL(drm_irq_uninstall);
179 288
180/** 289/**
@@ -201,7 +310,7 @@ int drm_control(struct drm_device *dev, void *data,
201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 310 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 return 0; 311 return 0;
203 if (dev->if_version < DRM_IF_VERSION(1, 2) && 312 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
204 ctl->irq != dev->irq) 313 ctl->irq != dev->pdev->irq)
205 return -EINVAL; 314 return -EINVAL;
206 return drm_irq_install(dev); 315 return drm_irq_install(dev);
207 case DRM_UNINST_HANDLER: 316 case DRM_UNINST_HANDLER:
@@ -214,6 +323,174 @@ int drm_control(struct drm_device *dev, void *data,
214} 323}
215 324
216/** 325/**
326 * drm_vblank_count - retrieve "cooked" vblank counter value
327 * @dev: DRM device
328 * @crtc: which counter to retrieve
329 *
330 * Fetches the "cooked" vblank count value that represents the number of
331 * vblank events since the system was booted, including lost events due to
332 * modesetting activity.
333 */
334u32 drm_vblank_count(struct drm_device *dev, int crtc)
335{
336 return atomic_read(&dev->_vblank_count[crtc]);
337}
338EXPORT_SYMBOL(drm_vblank_count);
339
340/**
341 * drm_update_vblank_count - update the master vblank counter
342 * @dev: DRM device
343 * @crtc: counter to update
344 *
345 * Call back into the driver to update the appropriate vblank counter
346 * (specified by @crtc). Deal with wraparound, if it occurred, and
347 * update the last read value so we can deal with wraparound on the next
348 * call if necessary.
349 *
350 * Only necessary when going from off->on, to account for frames we
351 * didn't get an interrupt for.
352 *
353 * Note: caller must hold dev->vbl_lock since this reads & writes
354 * device vblank fields.
355 */
356static void drm_update_vblank_count(struct drm_device *dev, int crtc)
357{
358 u32 cur_vblank, diff;
359
360 /*
361 * Interrupts were disabled prior to this call, so deal with counter
362 * wrap if needed.
363 * NOTE! It's possible we lost a full dev->max_vblank_count events
364 * here if the register is small or we had vblank interrupts off for
365 * a long time.
366 */
367 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
368 diff = cur_vblank - dev->last_vblank[crtc];
369 if (cur_vblank < dev->last_vblank[crtc]) {
370 diff += dev->max_vblank_count;
371
372 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
373 crtc, dev->last_vblank[crtc], cur_vblank, diff);
374 }
375
376 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
377 crtc, diff);
378
379 atomic_add(diff, &dev->_vblank_count[crtc]);
380}
381
382/**
383 * drm_vblank_get - get a reference count on vblank events
384 * @dev: DRM device
385 * @crtc: which CRTC to own
386 *
387 * Acquire a reference count on vblank events to avoid having them disabled
388 * while in use.
389 *
390 * RETURNS
391 * Zero on success, nonzero on failure.
392 */
393int drm_vblank_get(struct drm_device *dev, int crtc)
394{
395 unsigned long irqflags;
396 int ret = 0;
397
398 spin_lock_irqsave(&dev->vbl_lock, irqflags);
399 /* Going from 0->1 means we have to enable interrupts again */
400 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
401 !dev->vblank_enabled[crtc]) {
402 ret = dev->driver->enable_vblank(dev, crtc);
403 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
404 if (ret)
405 atomic_dec(&dev->vblank_refcount[crtc]);
406 else {
407 dev->vblank_enabled[crtc] = 1;
408 drm_update_vblank_count(dev, crtc);
409 }
410 }
411 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
412
413 return ret;
414}
415EXPORT_SYMBOL(drm_vblank_get);
416
417/**
418 * drm_vblank_put - give up ownership of vblank events
419 * @dev: DRM device
420 * @crtc: which counter to give up
421 *
422 * Release ownership of a given vblank counter, turning off interrupts
423 * if possible.
424 */
425void drm_vblank_put(struct drm_device *dev, int crtc)
426{
427 /* Last user schedules interrupt disable */
428 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
429 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
430}
431EXPORT_SYMBOL(drm_vblank_put);
432
433/**
434 * drm_modeset_ctl - handle vblank event counter changes across mode switch
435 * @DRM_IOCTL_ARGS: standard ioctl arguments
436 *
437 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
438 * ioctls around modesetting so that any lost vblank events are accounted for.
439 *
440 * Generally the counter will reset across mode sets. If interrupts are
441 * enabled around this call, we don't have to do anything since the counter
442 * will have already been incremented.
443 */
444int drm_modeset_ctl(struct drm_device *dev, void *data,
445 struct drm_file *file_priv)
446{
447 struct drm_modeset_ctl *modeset = data;
448 unsigned long irqflags;
449 int crtc, ret = 0;
450
451 /* If drm_vblank_init() hasn't been called yet, just no-op */
452 if (!dev->num_crtcs)
453 goto out;
454
455 crtc = modeset->crtc;
456 if (crtc >= dev->num_crtcs) {
457 ret = -EINVAL;
458 goto out;
459 }
460
461 /*
462 * To avoid all the problems that might happen if interrupts
463 * were enabled/disabled around or between these calls, we just
464 * have the kernel take a reference on the CRTC (just once though
465 * to avoid corrupting the count if multiple, mismatch calls occur),
466 * so that interrupts remain enabled in the interim.
467 */
468 switch (modeset->cmd) {
469 case _DRM_PRE_MODESET:
470 if (!dev->vblank_inmodeset[crtc]) {
471 dev->vblank_inmodeset[crtc] = 1;
472 drm_vblank_get(dev, crtc);
473 }
474 break;
475 case _DRM_POST_MODESET:
476 if (dev->vblank_inmodeset[crtc]) {
477 spin_lock_irqsave(&dev->vbl_lock, irqflags);
478 dev->vblank_disable_allowed = 1;
479 dev->vblank_inmodeset[crtc] = 0;
480 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
481 drm_vblank_put(dev, crtc);
482 }
483 break;
484 default:
485 ret = -EINVAL;
486 break;
487 }
488
489out:
490 return ret;
491}
492
493/**
217 * Wait for VBLANK. 494 * Wait for VBLANK.
218 * 495 *
219 * \param inode device inode. 496 * \param inode device inode.
@@ -232,14 +509,14 @@ int drm_control(struct drm_device *dev, void *data,
232 * 509 *
233 * If a signal is not requested, then calls vblank_wait(). 510 * If a signal is not requested, then calls vblank_wait().
234 */ 511 */
235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 512int drm_wait_vblank(struct drm_device *dev, void *data,
513 struct drm_file *file_priv)
236{ 514{
237 union drm_wait_vblank *vblwait = data; 515 union drm_wait_vblank *vblwait = data;
238 struct timeval now;
239 int ret = 0; 516 int ret = 0;
240 unsigned int flags, seq; 517 unsigned int flags, seq, crtc;
241 518
242 if ((!dev->irq) || (!dev->irq_enabled)) 519 if ((!dev->pdev->irq) || (!dev->irq_enabled))
243 return -EINVAL; 520 return -EINVAL;
244 521
245 if (vblwait->request.type & 522 if (vblwait->request.type &
@@ -251,13 +528,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
251 } 528 }
252 529
253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 530 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
531 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
254 532
255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 533 if (crtc >= dev->num_crtcs)
256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
257 return -EINVAL; 534 return -EINVAL;
258 535
259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 536 ret = drm_vblank_get(dev, crtc);
260 : &dev->vbl_received); 537 if (ret) {
538 DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
539 return ret;
540 }
541 seq = drm_vblank_count(dev, crtc);
261 542
262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 543 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
263 case _DRM_VBLANK_RELATIVE: 544 case _DRM_VBLANK_RELATIVE:
@@ -266,7 +547,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
266 case _DRM_VBLANK_ABSOLUTE: 547 case _DRM_VBLANK_ABSOLUTE:
267 break; 548 break;
268 default: 549 default:
269 return -EINVAL; 550 ret = -EINVAL;
551 goto done;
270 } 552 }
271 553
272 if ((flags & _DRM_VBLANK_NEXTONMISS) && 554 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -276,8 +558,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
276 558
277 if (flags & _DRM_VBLANK_SIGNAL) { 559 if (flags & _DRM_VBLANK_SIGNAL) {
278 unsigned long irqflags; 560 unsigned long irqflags;
279 struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) 561 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
280 ? &dev->vbl_sigs2 : &dev->vbl_sigs;
281 struct drm_vbl_sig *vbl_sig; 562 struct drm_vbl_sig *vbl_sig;
282 563
283 spin_lock_irqsave(&dev->vbl_lock, irqflags); 564 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +579,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
298 } 579 }
299 } 580 }
300 581
301 if (dev->vbl_pending >= 100) { 582 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 583 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
303 return -EBUSY; 584 ret = -EBUSY;
585 goto done;
304 } 586 }
305 587
306 dev->vbl_pending++;
307
308 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 588 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
309 589
310 if (! 590 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
311 (vbl_sig = 591 DRM_MEM_DRIVER);
312 drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { 592 if (!vbl_sig) {
313 return -ENOMEM; 593 ret = -ENOMEM;
594 goto done;
595 }
596
597 ret = drm_vblank_get(dev, crtc);
598 if (ret) {
599 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
600 DRM_MEM_DRIVER);
601 return ret;
314 } 602 }
315 603
316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 604 atomic_inc(&dev->vbl_signal_pending);
317 605
318 vbl_sig->sequence = vblwait->request.sequence; 606 vbl_sig->sequence = vblwait->request.sequence;
319 vbl_sig->info.si_signo = vblwait->request.signal; 607 vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,20 +615,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
327 615
328 vblwait->reply.sequence = seq; 616 vblwait->reply.sequence = seq;
329 } else { 617 } else {
330 if (flags & _DRM_VBLANK_SECONDARY) { 618 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
331 if (dev->driver->vblank_wait2) 619 vblwait->request.sequence, crtc);
332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 620 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
333 } else if (dev->driver->vblank_wait) 621 ((drm_vblank_count(dev, crtc)
334 ret = 622 - vblwait->request.sequence) <= (1 << 23)));
335 dev->driver->vblank_wait(dev, 623
336 &vblwait->request.sequence); 624 if (ret != -EINTR) {
337 625 struct timeval now;
338 do_gettimeofday(&now); 626
339 vblwait->reply.tval_sec = now.tv_sec; 627 do_gettimeofday(&now);
340 vblwait->reply.tval_usec = now.tv_usec; 628
629 vblwait->reply.tval_sec = now.tv_sec;
630 vblwait->reply.tval_usec = now.tv_usec;
631 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
632 DRM_DEBUG("returning %d to client\n",
633 vblwait->reply.sequence);
634 } else {
635 DRM_DEBUG("vblank wait interrupted by signal\n");
636 }
341 } 637 }
342 638
343 done: 639done:
640 drm_vblank_put(dev, crtc);
344 return ret; 641 return ret;
345} 642}
346 643
@@ -348,44 +645,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
348 * Send the VBLANK signals. 645 * Send the VBLANK signals.
349 * 646 *
350 * \param dev DRM device. 647 * \param dev DRM device.
648 * \param crtc CRTC where the vblank event occurred
351 * 649 *
352 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 650 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
353 * 651 *
354 * If a signal is not requested, then calls vblank_wait(). 652 * If a signal is not requested, then calls vblank_wait().
355 */ 653 */
356void drm_vbl_send_signals(struct drm_device * dev) 654static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
357{ 655{
656 struct drm_vbl_sig *vbl_sig, *tmp;
657 struct list_head *vbl_sigs;
658 unsigned int vbl_seq;
358 unsigned long flags; 659 unsigned long flags;
359 int i;
360 660
361 spin_lock_irqsave(&dev->vbl_lock, flags); 661 spin_lock_irqsave(&dev->vbl_lock, flags);
362 662
363 for (i = 0; i < 2; i++) { 663 vbl_sigs = &dev->vbl_sigs[crtc];
364 struct drm_vbl_sig *vbl_sig, *tmp; 664 vbl_seq = drm_vblank_count(dev, crtc);
365 struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
366 unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
367 &dev->vbl_received);
368 665
369 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 666 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
370 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 667 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
371 vbl_sig->info.si_code = vbl_seq; 668 vbl_sig->info.si_code = vbl_seq;
372 send_sig_info(vbl_sig->info.si_signo, 669 send_sig_info(vbl_sig->info.si_signo,
373 &vbl_sig->info, vbl_sig->task); 670 &vbl_sig->info, vbl_sig->task);
374 671
375 list_del(&vbl_sig->head); 672 list_del(&vbl_sig->head);
376
377 drm_free(vbl_sig, sizeof(*vbl_sig),
378 DRM_MEM_DRIVER);
379 673
380 dev->vbl_pending--; 674 drm_free(vbl_sig, sizeof(*vbl_sig),
381 } 675 DRM_MEM_DRIVER);
382 } 676 atomic_dec(&dev->vbl_signal_pending);
677 drm_vblank_put(dev, crtc);
678 }
383 } 679 }
384 680
385 spin_unlock_irqrestore(&dev->vbl_lock, flags); 681 spin_unlock_irqrestore(&dev->vbl_lock, flags);
386} 682}
387 683
388EXPORT_SYMBOL(drm_vbl_send_signals); 684/**
685 * drm_handle_vblank - handle a vblank event
686 * @dev: DRM device
687 * @crtc: where this event occurred
688 *
689 * Drivers should call this routine in their vblank interrupt handlers to
690 * update the vblank counter and send any signals that may be pending.
691 */
692void drm_handle_vblank(struct drm_device *dev, int crtc)
693{
694 atomic_inc(&dev->_vblank_count[crtc]);
695 DRM_WAKEUP(&dev->vbl_queue[crtc]);
696 drm_vbl_send_signals(dev, crtc);
697}
698EXPORT_SYMBOL(drm_handle_vblank);
389 699
390/** 700/**
391 * Tasklet wrapper function. 701 * Tasklet wrapper function.
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0177012845c6..803bc9e7ce3c 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
133{ 133{
134 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 134 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
135} 135}
136EXPORT_SYMBOL(drm_free_agp);
136 137
137/** Wrapper around agp_bind_memory() */ 138/** Wrapper around agp_bind_memory() */
138int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 139int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
145{ 146{
146 return drm_agp_unbind_memory(handle); 147 return drm_agp_unbind_memory(handle);
147} 148}
149EXPORT_SYMBOL(drm_unbind_agp);
148 150
149#else /* __OS_HAS_AGP */ 151#else /* __OS_HAS_AGP */
150static inline void *agp_remap(unsigned long offset, unsigned long size, 152static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index dcff9e9b52e3..217ad7dc7076 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 169
170 return child; 170 return child;
171} 171}
172EXPORT_SYMBOL(drm_mm_get_block);
172 173
173/* 174/*
174 * Put a block. Merge with the previous and / or next block if they are free. 175 * Put a block. Merge with the previous and / or next block if they are free.
@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
217 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 218 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
218 } 219 }
219} 220}
221EXPORT_SYMBOL(drm_mm_put_block);
220 222
221struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
222 unsigned long size, 224 unsigned long size,
@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
265 267
266 return (head->next->next == head); 268 return (head->next->next == head);
267} 269}
270EXPORT_SYMBOL(drm_mm_search_free);
268 271
269int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
270{ 273{
@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273 276
274 return drm_mm_create_tail_node(mm, start, size); 277 return drm_mm_create_tail_node(mm, start, size);
275} 278}
276 279EXPORT_SYMBOL(drm_mm_init);
277 280
278void drm_mm_takedown(struct drm_mm * mm) 281void drm_mm_takedown(struct drm_mm * mm)
279{ 282{
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 93b1e0475c93..d490db4c0de0 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data); 49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset, 50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data); 51 int request, int *eof, void *data);
52static int drm_gem_name_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_object_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
52#if DRM_DEBUG_CODE 56#if DRM_DEBUG_CODE
53static int drm_vma_info(char *buf, char **start, off_t offset, 57static int drm_vma_info(char *buf, char **start, off_t offset,
54 int request, int *eof, void *data); 58 int request, int *eof, void *data);
@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
60static struct drm_proc_list { 64static struct drm_proc_list {
61 const char *name; /**< file name */ 65 const char *name; /**< file name */
62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 66 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
67 u32 driver_features; /**< Required driver features for this entry */
63} drm_proc_list[] = { 68} drm_proc_list[] = {
64 {"name", drm_name_info}, 69 {"name", drm_name_info, 0},
65 {"mem", drm_mem_info}, 70 {"mem", drm_mem_info, 0},
66 {"vm", drm_vm_info}, 71 {"vm", drm_vm_info, 0},
67 {"clients", drm_clients_info}, 72 {"clients", drm_clients_info, 0},
68 {"queues", drm_queues_info}, 73 {"queues", drm_queues_info, 0},
69 {"bufs", drm_bufs_info}, 74 {"bufs", drm_bufs_info, 0},
75 {"gem_names", drm_gem_name_info, DRIVER_GEM},
76 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
70#if DRM_DEBUG_CODE 77#if DRM_DEBUG_CODE
71 {"vma", drm_vma_info}, 78 {"vma", drm_vma_info},
72#endif 79#endif
@@ -90,8 +97,9 @@ static struct drm_proc_list {
90int drm_proc_init(struct drm_minor *minor, int minor_id, 97int drm_proc_init(struct drm_minor *minor, int minor_id,
91 struct proc_dir_entry *root) 98 struct proc_dir_entry *root)
92{ 99{
100 struct drm_device *dev = minor->dev;
93 struct proc_dir_entry *ent; 101 struct proc_dir_entry *ent;
94 int i, j; 102 int i, j, ret;
95 char name[64]; 103 char name[64];
96 104
97 sprintf(name, "%d", minor_id); 105 sprintf(name, "%d", minor_id);
@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
102 } 110 }
103 111
104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 112 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
113 u32 features = drm_proc_list[i].driver_features;
114
115 if (features != 0 &&
116 (dev->driver->driver_features & features) != features)
117 continue;
118
105 ent = create_proc_entry(drm_proc_list[i].name, 119 ent = create_proc_entry(drm_proc_list[i].name,
106 S_IFREG | S_IRUGO, minor->dev_root); 120 S_IFREG | S_IRUGO, minor->dev_root);
107 if (!ent) { 121 if (!ent) {
108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 122 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
109 name, drm_proc_list[i].name); 123 name, drm_proc_list[i].name);
110 for (j = 0; j < i; j++) 124 ret = -1;
111 remove_proc_entry(drm_proc_list[i].name, 125 goto fail;
112 minor->dev_root);
113 remove_proc_entry(name, root);
114 minor->dev_root = NULL;
115 return -1;
116 } 126 }
117 ent->read_proc = drm_proc_list[i].f; 127 ent->read_proc = drm_proc_list[i].f;
118 ent->data = minor; 128 ent->data = minor;
119 } 129 }
120 130
131 if (dev->driver->proc_init) {
132 ret = dev->driver->proc_init(minor);
133 if (ret) {
134 DRM_ERROR("DRM: Driver failed to initialize "
135 "/proc/dri.\n");
136 goto fail;
137 }
138 }
139
121 return 0; 140 return 0;
141 fail:
142
143 for (j = 0; j < i; j++)
144 remove_proc_entry(drm_proc_list[i].name,
145 minor->dev_root);
146 remove_proc_entry(name, root);
147 minor->dev_root = NULL;
148 return ret;
122} 149}
123 150
124/** 151/**
@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
133 */ 160 */
134int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 161int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
135{ 162{
163 struct drm_device *dev = minor->dev;
136 int i; 164 int i;
137 char name[64]; 165 char name[64];
138 166
139 if (!root || !minor->dev_root) 167 if (!root || !minor->dev_root)
140 return 0; 168 return 0;
141 169
170 if (dev->driver->proc_cleanup)
171 dev->driver->proc_cleanup(minor);
172
142 for (i = 0; i < DRM_PROC_ENTRIES; i++) 173 for (i = 0; i < DRM_PROC_ENTRIES; i++)
143 remove_proc_entry(drm_proc_list[i].name, minor->dev_root); 174 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
144 sprintf(name, "%d", minor->index); 175 sprintf(name, "%d", minor->index);
@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
480 return ret; 511 return ret;
481} 512}
482 513
514struct drm_gem_name_info_data {
515 int len;
516 char *buf;
517 int eof;
518};
519
520static int drm_gem_one_name_info(int id, void *ptr, void *data)
521{
522 struct drm_gem_object *obj = ptr;
523 struct drm_gem_name_info_data *nid = data;
524
525 DRM_INFO("name %d size %d\n", obj->name, obj->size);
526 if (nid->eof)
527 return 0;
528
529 nid->len += sprintf(&nid->buf[nid->len],
530 "%6d%9d%8d%9d\n",
531 obj->name, obj->size,
532 atomic_read(&obj->handlecount.refcount),
533 atomic_read(&obj->refcount.refcount));
534 if (nid->len > DRM_PROC_LIMIT) {
535 nid->eof = 1;
536 return 0;
537 }
538 return 0;
539}
540
541static int drm_gem_name_info(char *buf, char **start, off_t offset,
542 int request, int *eof, void *data)
543{
544 struct drm_minor *minor = (struct drm_minor *) data;
545 struct drm_device *dev = minor->dev;
546 struct drm_gem_name_info_data nid;
547
548 if (offset > DRM_PROC_LIMIT) {
549 *eof = 1;
550 return 0;
551 }
552
553 nid.len = sprintf(buf, " name size handles refcount\n");
554 nid.buf = buf;
555 nid.eof = 0;
556 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
557
558 *start = &buf[offset];
559 *eof = 0;
560 if (nid.len > request + offset)
561 return request;
562 *eof = 1;
563 return nid.len - offset;
564}
565
566static int drm_gem_object_info(char *buf, char **start, off_t offset,
567 int request, int *eof, void *data)
568{
569 struct drm_minor *minor = (struct drm_minor *) data;
570 struct drm_device *dev = minor->dev;
571 int len = 0;
572
573 if (offset > DRM_PROC_LIMIT) {
574 *eof = 1;
575 return 0;
576 }
577
578 *start = &buf[offset];
579 *eof = 0;
580 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
581 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
582 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
583 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
584 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
585 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
586 if (len > request + offset)
587 return request;
588 *eof = 1;
589 return len - offset;
590}
591
483#if DRM_DEBUG_CODE 592#if DRM_DEBUG_CODE
484 593
485static int drm__vma_info(char *buf, char **start, off_t offset, int request, 594static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c2f584f3b46c..141e33004a76 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -107,7 +107,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
107#ifdef __alpha__ 107#ifdef __alpha__
108 dev->hose = pdev->sysdata; 108 dev->hose = pdev->sysdata;
109#endif 109#endif
110 dev->irq = pdev->irq;
111 110
112 if (drm_ht_create(&dev->map_hash, 12)) { 111 if (drm_ht_create(&dev->map_hash, 12)) {
113 return -ENOMEM; 112 return -ENOMEM;
@@ -152,6 +151,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
152 goto error_out_unreg; 151 goto error_out_unreg;
153 } 152 }
154 153
154 if (driver->driver_features & DRIVER_GEM) {
155 retcode = drm_gem_init(dev);
156 if (retcode) {
157 DRM_ERROR("Cannot initialize graphics execution "
158 "manager (GEM)\n");
159 goto error_out_unreg;
160 }
161 }
162
155 return 0; 163 return 0;
156 164
157 error_out_unreg: 165 error_out_unreg:
@@ -317,6 +325,7 @@ int drm_put_dev(struct drm_device * dev)
317int drm_put_minor(struct drm_minor **minor_p) 325int drm_put_minor(struct drm_minor **minor_p)
318{ 326{
319 struct drm_minor *minor = *minor_p; 327 struct drm_minor *minor = *minor_p;
328
320 DRM_DEBUG("release secondary minor %d\n", minor->index); 329 DRM_DEBUG("release secondary minor %d\n", minor->index);
321 330
322 if (minor->type == DRM_MINOR_LEGACY) 331 if (minor->type == DRM_MINOR_LEGACY)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index af211a0ef179..1611b9bcbe7f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
184err_out_files: 184err_out_files:
185 if (i > 0) 185 if (i > 0)
186 for (j = 0; j < i; j++) 186 for (j = 0; j < i; j++)
187 device_remove_file(&minor->kdev, &device_attrs[i]); 187 device_remove_file(&minor->kdev, &device_attrs[j]);
188 device_unregister(&minor->kdev); 188 device_unregister(&minor->kdev);
189err_out: 189err_out:
190 190
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a9e60464df74..5ba78e4fd2b5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,12 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
7 i915_suspend.o \
8 i915_gem.o \
9 i915_gem_debug.o \
10 i915_gem_proc.o \
11 i915_gem_tiling.o
7 12
8i915-$(CONFIG_COMPAT) += i915_ioc32.o 13i915-$(CONFIG_COMPAT) += i915_ioc32.o
9 14
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9ac4720e647b..db34780edbb2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,40 +40,96 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40{ 40{
41 drm_i915_private_t *dev_priv = dev->dev_private; 41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 43 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
44 u32 last_acthd = I915_READ(acthd_reg);
45 u32 acthd;
46 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
44 int i; 47 int i;
45 48
46 for (i = 0; i < 10000; i++) { 49 for (i = 0; i < 100000; i++) {
47 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 50 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51 acthd = I915_READ(acthd_reg);
48 ring->space = ring->head - (ring->tail + 8); 52 ring->space = ring->head - (ring->tail + 8);
49 if (ring->space < 0) 53 if (ring->space < 0)
50 ring->space += ring->Size; 54 ring->space += ring->Size;
51 if (ring->space >= n) 55 if (ring->space >= n)
52 return 0; 56 return 0;
53 57
54 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 58 if (dev_priv->sarea_priv)
59 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55 60
56 if (ring->head != last_head) 61 if (ring->head != last_head)
57 i = 0; 62 i = 0;
63 if (acthd != last_acthd)
64 i = 0;
58 65
59 last_head = ring->head; 66 last_head = ring->head;
67 last_acthd = acthd;
68 msleep_interruptible(10);
69
60 } 70 }
61 71
62 return -EBUSY; 72 return -EBUSY;
63} 73}
64 74
75/**
76 * Sets up the hardware status page for devices that need a physical address
77 * in the register.
78 */
79static int i915_init_phys_hws(struct drm_device *dev)
80{
81 drm_i915_private_t *dev_priv = dev->dev_private;
82 /* Program Hardware Status Page */
83 dev_priv->status_page_dmah =
84 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
85
86 if (!dev_priv->status_page_dmah) {
87 DRM_ERROR("Can not allocate hardware status page\n");
88 return -ENOMEM;
89 }
90 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
91 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
92
93 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
94
95 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
96 DRM_DEBUG("Enabled hardware status page\n");
97 return 0;
98}
99
100/**
101 * Frees the hardware status page, whether it's a physical address or a virtual
102 * address set up by the X Server.
103 */
104static void i915_free_hws(struct drm_device *dev)
105{
106 drm_i915_private_t *dev_priv = dev->dev_private;
107 if (dev_priv->status_page_dmah) {
108 drm_pci_free(dev, dev_priv->status_page_dmah);
109 dev_priv->status_page_dmah = NULL;
110 }
111
112 if (dev_priv->status_gfx_addr) {
113 dev_priv->status_gfx_addr = 0;
114 drm_core_ioremapfree(&dev_priv->hws_map, dev);
115 }
116
117 /* Need to rewrite hardware status page */
118 I915_WRITE(HWS_PGA, 0x1ffff000);
119}
120
65void i915_kernel_lost_context(struct drm_device * dev) 121void i915_kernel_lost_context(struct drm_device * dev)
66{ 122{
67 drm_i915_private_t *dev_priv = dev->dev_private; 123 drm_i915_private_t *dev_priv = dev->dev_private;
68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 124 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69 125
70 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 126 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
71 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; 127 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
72 ring->space = ring->head - (ring->tail + 8); 128 ring->space = ring->head - (ring->tail + 8);
73 if (ring->space < 0) 129 if (ring->space < 0)
74 ring->space += ring->Size; 130 ring->space += ring->Size;
75 131
76 if (ring->head == ring->tail) 132 if (ring->head == ring->tail && dev_priv->sarea_priv)
77 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 133 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78} 134}
79 135
@@ -84,28 +140,19 @@ static int i915_dma_cleanup(struct drm_device * dev)
84 * may not have been called from userspace and after dev_private 140 * may not have been called from userspace and after dev_private
85 * is freed, it's too late. 141 * is freed, it's too late.
86 */ 142 */
87 if (dev->irq) 143 if (dev->irq_enabled)
88 drm_irq_uninstall(dev); 144 drm_irq_uninstall(dev);
89 145
90 if (dev_priv->ring.virtual_start) { 146 if (dev_priv->ring.virtual_start) {
91 drm_core_ioremapfree(&dev_priv->ring.map, dev); 147 drm_core_ioremapfree(&dev_priv->ring.map, dev);
92 dev_priv->ring.virtual_start = 0; 148 dev_priv->ring.virtual_start = NULL;
93 dev_priv->ring.map.handle = 0; 149 dev_priv->ring.map.handle = NULL;
94 dev_priv->ring.map.size = 0; 150 dev_priv->ring.map.size = 0;
95 } 151 }
96 152
97 if (dev_priv->status_page_dmah) { 153 /* Clear the HWS virtual address at teardown */
98 drm_pci_free(dev, dev_priv->status_page_dmah); 154 if (I915_NEED_GFX_HWS(dev))
99 dev_priv->status_page_dmah = NULL; 155 i915_free_hws(dev);
100 /* Need to rewrite hardware status page */
101 I915_WRITE(0x02080, 0x1ffff000);
102 }
103
104 if (dev_priv->status_gfx_addr) {
105 dev_priv->status_gfx_addr = 0;
106 drm_core_ioremapfree(&dev_priv->hws_map, dev);
107 I915_WRITE(0x2080, 0x1ffff000);
108 }
109 156
110 return 0; 157 return 0;
111} 158}
@@ -121,34 +168,34 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
121 return -EINVAL; 168 return -EINVAL;
122 } 169 }
123 170
124 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
125 if (!dev_priv->mmio_map) {
126 i915_dma_cleanup(dev);
127 DRM_ERROR("can not find mmio map!\n");
128 return -EINVAL;
129 }
130
131 dev_priv->sarea_priv = (drm_i915_sarea_t *) 171 dev_priv->sarea_priv = (drm_i915_sarea_t *)
132 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 172 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
133 173
134 dev_priv->ring.Start = init->ring_start; 174 if (init->ring_size != 0) {
135 dev_priv->ring.End = init->ring_end; 175 if (dev_priv->ring.ring_obj != NULL) {
136 dev_priv->ring.Size = init->ring_size; 176 i915_dma_cleanup(dev);
137 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 177 DRM_ERROR("Client tried to initialize ringbuffer in "
178 "GEM mode\n");
179 return -EINVAL;
180 }
138 181
139 dev_priv->ring.map.offset = init->ring_start; 182 dev_priv->ring.Size = init->ring_size;
140 dev_priv->ring.map.size = init->ring_size; 183 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
141 dev_priv->ring.map.type = 0;
142 dev_priv->ring.map.flags = 0;
143 dev_priv->ring.map.mtrr = 0;
144 184
145 drm_core_ioremap(&dev_priv->ring.map, dev); 185 dev_priv->ring.map.offset = init->ring_start;
186 dev_priv->ring.map.size = init->ring_size;
187 dev_priv->ring.map.type = 0;
188 dev_priv->ring.map.flags = 0;
189 dev_priv->ring.map.mtrr = 0;
146 190
147 if (dev_priv->ring.map.handle == NULL) { 191 drm_core_ioremap(&dev_priv->ring.map, dev);
148 i915_dma_cleanup(dev); 192
149 DRM_ERROR("can not ioremap virtual address for" 193 if (dev_priv->ring.map.handle == NULL) {
150 " ring buffer\n"); 194 i915_dma_cleanup(dev);
151 return -ENOMEM; 195 DRM_ERROR("can not ioremap virtual address for"
196 " ring buffer\n");
197 return -ENOMEM;
198 }
152 } 199 }
153 200
154 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 201 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -159,34 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
159 dev_priv->current_page = 0; 206 dev_priv->current_page = 0;
160 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 207 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
161 208
162 /* We are using separate values as placeholders for mechanisms for
163 * private backbuffer/depthbuffer usage.
164 */
165 dev_priv->use_mi_batchbuffer_start = 0;
166 if (IS_I965G(dev)) /* 965 doesn't support older method */
167 dev_priv->use_mi_batchbuffer_start = 1;
168
169 /* Allow hardware batchbuffers unless told otherwise. 209 /* Allow hardware batchbuffers unless told otherwise.
170 */ 210 */
171 dev_priv->allow_batchbuffer = 1; 211 dev_priv->allow_batchbuffer = 1;
172 212
173 /* Program Hardware Status Page */
174 if (!I915_NEED_GFX_HWS(dev)) {
175 dev_priv->status_page_dmah =
176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
177
178 if (!dev_priv->status_page_dmah) {
179 i915_dma_cleanup(dev);
180 DRM_ERROR("Can not allocate hardware status page\n");
181 return -ENOMEM;
182 }
183 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
184 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
185
186 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
187 I915_WRITE(0x02080, dev_priv->dma_status_page);
188 }
189 DRM_DEBUG("Enabled hardware status page\n");
190 return 0; 213 return 0;
191} 214}
192 215
@@ -201,11 +224,6 @@ static int i915_dma_resume(struct drm_device * dev)
201 return -EINVAL; 224 return -EINVAL;
202 } 225 }
203 226
204 if (!dev_priv->mmio_map) {
205 DRM_ERROR("can not find mmio map!\n");
206 return -EINVAL;
207 }
208
209 if (dev_priv->ring.map.handle == NULL) { 227 if (dev_priv->ring.map.handle == NULL) {
210 DRM_ERROR("can not ioremap virtual address for" 228 DRM_ERROR("can not ioremap virtual address for"
211 " ring buffer\n"); 229 " ring buffer\n");
@@ -220,9 +238,9 @@ static int i915_dma_resume(struct drm_device * dev)
220 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 238 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
221 239
222 if (dev_priv->status_gfx_addr != 0) 240 if (dev_priv->status_gfx_addr != 0)
223 I915_WRITE(0x02080, dev_priv->status_gfx_addr); 241 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
224 else 242 else
225 I915_WRITE(0x02080, dev_priv->dma_status_page); 243 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
226 DRM_DEBUG("Enabled hardware status page\n"); 244 DRM_DEBUG("Enabled hardware status page\n");
227 245
228 return 0; 246 return 0;
@@ -367,9 +385,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
367 return 0; 385 return 0;
368} 386}
369 387
370static int i915_emit_box(struct drm_device * dev, 388int
371 struct drm_clip_rect __user * boxes, 389i915_emit_box(struct drm_device *dev,
372 int i, int DR1, int DR4) 390 struct drm_clip_rect __user *boxes,
391 int i, int DR1, int DR4)
373{ 392{
374 drm_i915_private_t *dev_priv = dev->dev_private; 393 drm_i915_private_t *dev_priv = dev->dev_private;
375 struct drm_clip_rect box; 394 struct drm_clip_rect box;
@@ -415,14 +434,15 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
415 drm_i915_private_t *dev_priv = dev->dev_private; 434 drm_i915_private_t *dev_priv = dev->dev_private;
416 RING_LOCALS; 435 RING_LOCALS;
417 436
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 437 dev_priv->counter++;
419
420 if (dev_priv->counter > 0x7FFFFFFFUL) 438 if (dev_priv->counter > 0x7FFFFFFFUL)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 439 dev_priv->counter = 0;
440 if (dev_priv->sarea_priv)
441 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
422 442
423 BEGIN_LP_RING(4); 443 BEGIN_LP_RING(4);
424 OUT_RING(CMD_STORE_DWORD_IDX); 444 OUT_RING(MI_STORE_DWORD_INDEX);
425 OUT_RING(20); 445 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
426 OUT_RING(dev_priv->counter); 446 OUT_RING(dev_priv->counter);
427 OUT_RING(0); 447 OUT_RING(0);
428 ADVANCE_LP_RING(); 448 ADVANCE_LP_RING();
@@ -486,7 +506,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
486 return ret; 506 return ret;
487 } 507 }
488 508
489 if (dev_priv->use_mi_batchbuffer_start) { 509 if (!IS_I830(dev) && !IS_845G(dev)) {
490 BEGIN_LP_RING(2); 510 BEGIN_LP_RING(2);
491 if (IS_I965G(dev)) { 511 if (IS_I965G(dev)) {
492 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 512 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@@ -516,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
516 drm_i915_private_t *dev_priv = dev->dev_private; 536 drm_i915_private_t *dev_priv = dev->dev_private;
517 RING_LOCALS; 537 RING_LOCALS;
518 538
539 if (!dev_priv->sarea_priv)
540 return -EINVAL;
541
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 542 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
520 __func__, 543 __func__,
521 dev_priv->current_page, 544 dev_priv->current_page,
@@ -524,7 +547,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
524 i915_kernel_lost_context(dev); 547 i915_kernel_lost_context(dev);
525 548
526 BEGIN_LP_RING(2); 549 BEGIN_LP_RING(2);
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 550 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
528 OUT_RING(0); 551 OUT_RING(0);
529 ADVANCE_LP_RING(); 552 ADVANCE_LP_RING();
530 553
@@ -549,8 +572,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 572 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
550 573
551 BEGIN_LP_RING(4); 574 BEGIN_LP_RING(4);
552 OUT_RING(CMD_STORE_DWORD_IDX); 575 OUT_RING(MI_STORE_DWORD_INDEX);
553 OUT_RING(20); 576 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
554 OUT_RING(dev_priv->counter); 577 OUT_RING(dev_priv->counter);
555 OUT_RING(0); 578 OUT_RING(0);
556 ADVANCE_LP_RING(); 579 ADVANCE_LP_RING();
@@ -570,9 +593,15 @@ static int i915_quiescent(struct drm_device * dev)
570static int i915_flush_ioctl(struct drm_device *dev, void *data, 593static int i915_flush_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv) 594 struct drm_file *file_priv)
572{ 595{
573 LOCK_TEST_WITH_RETURN(dev, file_priv); 596 int ret;
597
598 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
599
600 mutex_lock(&dev->struct_mutex);
601 ret = i915_quiescent(dev);
602 mutex_unlock(&dev->struct_mutex);
574 603
575 return i915_quiescent(dev); 604 return ret;
576} 605}
577 606
578static int i915_batchbuffer(struct drm_device *dev, void *data, 607static int i915_batchbuffer(struct drm_device *dev, void *data,
@@ -593,16 +622,19 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
593 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 622 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
594 batch->start, batch->used, batch->num_cliprects); 623 batch->start, batch->used, batch->num_cliprects);
595 624
596 LOCK_TEST_WITH_RETURN(dev, file_priv); 625 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
597 626
598 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 627 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
599 batch->num_cliprects * 628 batch->num_cliprects *
600 sizeof(struct drm_clip_rect))) 629 sizeof(struct drm_clip_rect)))
601 return -EFAULT; 630 return -EFAULT;
602 631
632 mutex_lock(&dev->struct_mutex);
603 ret = i915_dispatch_batchbuffer(dev, batch); 633 ret = i915_dispatch_batchbuffer(dev, batch);
634 mutex_unlock(&dev->struct_mutex);
604 635
605 sarea_priv->last_dispatch = (int)hw_status[5]; 636 if (sarea_priv)
637 sarea_priv->last_dispatch = (int)hw_status[5];
606 return ret; 638 return ret;
607} 639}
608 640
@@ -619,7 +651,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
619 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 651 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
620 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 652 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
621 653
622 LOCK_TEST_WITH_RETURN(dev, file_priv); 654 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
623 655
624 if (cmdbuf->num_cliprects && 656 if (cmdbuf->num_cliprects &&
625 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 657 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@@ -629,24 +661,33 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
629 return -EFAULT; 661 return -EFAULT;
630 } 662 }
631 663
664 mutex_lock(&dev->struct_mutex);
632 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 665 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
666 mutex_unlock(&dev->struct_mutex);
633 if (ret) { 667 if (ret) {
634 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 668 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
635 return ret; 669 return ret;
636 } 670 }
637 671
638 sarea_priv->last_dispatch = (int)hw_status[5]; 672 if (sarea_priv)
673 sarea_priv->last_dispatch = (int)hw_status[5];
639 return 0; 674 return 0;
640} 675}
641 676
642static int i915_flip_bufs(struct drm_device *dev, void *data, 677static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv) 678 struct drm_file *file_priv)
644{ 679{
680 int ret;
681
645 DRM_DEBUG("%s\n", __func__); 682 DRM_DEBUG("%s\n", __func__);
646 683
647 LOCK_TEST_WITH_RETURN(dev, file_priv); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685
686 mutex_lock(&dev->struct_mutex);
687 ret = i915_dispatch_flip(dev);
688 mutex_unlock(&dev->struct_mutex);
648 689
649 return i915_dispatch_flip(dev); 690 return ret;
650} 691}
651 692
652static int i915_getparam(struct drm_device *dev, void *data, 693static int i915_getparam(struct drm_device *dev, void *data,
@@ -663,7 +704,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
663 704
664 switch (param->param) { 705 switch (param->param) {
665 case I915_PARAM_IRQ_ACTIVE: 706 case I915_PARAM_IRQ_ACTIVE:
666 value = dev->irq ? 1 : 0; 707 value = dev->pdev->irq ? 1 : 0;
667 break; 708 break;
668 case I915_PARAM_ALLOW_BATCHBUFFER: 709 case I915_PARAM_ALLOW_BATCHBUFFER:
669 value = dev_priv->allow_batchbuffer ? 1 : 0; 710 value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -671,6 +712,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
671 case I915_PARAM_LAST_DISPATCH: 712 case I915_PARAM_LAST_DISPATCH:
672 value = READ_BREADCRUMB(dev_priv); 713 value = READ_BREADCRUMB(dev_priv);
673 break; 714 break;
715 case I915_PARAM_CHIPSET_ID:
716 value = dev->pci_device;
717 break;
718 case I915_PARAM_HAS_GEM:
719 value = 1;
720 break;
674 default: 721 default:
675 DRM_ERROR("Unknown parameter %d\n", param->param); 722 DRM_ERROR("Unknown parameter %d\n", param->param);
676 return -EINVAL; 723 return -EINVAL;
@@ -697,8 +744,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
697 744
698 switch (param->param) { 745 switch (param->param) {
699 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 746 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
700 if (!IS_I965G(dev))
701 dev_priv->use_mi_batchbuffer_start = param->value;
702 break; 747 break;
703 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 748 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
704 dev_priv->tex_lru_log_granularity = param->value; 749 dev_priv->tex_lru_log_granularity = param->value;
@@ -749,8 +794,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
749 dev_priv->hw_status_page = dev_priv->hws_map.handle; 794 dev_priv->hw_status_page = dev_priv->hws_map.handle;
750 795
751 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 796 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
752 I915_WRITE(0x02080, dev_priv->status_gfx_addr); 797 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
753 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", 798 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
754 dev_priv->status_gfx_addr); 799 dev_priv->status_gfx_addr);
755 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 800 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
756 return 0; 801 return 0;
@@ -776,14 +821,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
776 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 821 memset(dev_priv, 0, sizeof(drm_i915_private_t));
777 822
778 dev->dev_private = (void *)dev_priv; 823 dev->dev_private = (void *)dev_priv;
824 dev_priv->dev = dev;
779 825
780 /* Add register map (needed for suspend/resume) */ 826 /* Add register map (needed for suspend/resume) */
781 base = drm_get_resource_start(dev, mmio_bar); 827 base = drm_get_resource_start(dev, mmio_bar);
782 size = drm_get_resource_len(dev, mmio_bar); 828 size = drm_get_resource_len(dev, mmio_bar);
783 829
784 ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 830 dev_priv->regs = ioremap(base, size);
785 _DRM_KERNEL | _DRM_DRIVER, 831
786 &dev_priv->mmio_map); 832 i915_gem_load(dev);
833
834 /* Init HWS */
835 if (!I915_NEED_GFX_HWS(dev)) {
836 ret = i915_init_phys_hws(dev);
837 if (ret != 0)
838 return ret;
839 }
840
841 /* On the 945G/GM, the chipset reports the MSI capability on the
842 * integrated graphics even though the support isn't actually there
843 * according to the published specs. It doesn't appear to function
844 * correctly in testing on 945G.
845 * This may be a side effect of MSI having been made available for PEG
846 * and the registers being closely associated.
847 */
848 if (!IS_I945G(dev) && !IS_I945GM(dev))
849 if (pci_enable_msi(dev->pdev))
850 DRM_ERROR("failed to enable MSI\n");
851
852 intel_opregion_init(dev);
853
854 spin_lock_init(&dev_priv->user_irq_lock);
855
787 return ret; 856 return ret;
788} 857}
789 858
@@ -791,8 +860,15 @@ int i915_driver_unload(struct drm_device *dev)
791{ 860{
792 struct drm_i915_private *dev_priv = dev->dev_private; 861 struct drm_i915_private *dev_priv = dev->dev_private;
793 862
794 if (dev_priv->mmio_map) 863 if (dev->pdev->msi_enabled)
795 drm_rmmap(dev, dev_priv->mmio_map); 864 pci_disable_msi(dev->pdev);
865
866 i915_free_hws(dev);
867
868 if (dev_priv->regs != NULL)
869 iounmap(dev_priv->regs);
870
871 intel_opregion_free(dev);
796 872
797 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 873 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
798 DRM_MEM_DRIVER); 874 DRM_MEM_DRIVER);
@@ -800,6 +876,25 @@ int i915_driver_unload(struct drm_device *dev)
800 return 0; 876 return 0;
801} 877}
802 878
879int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
880{
881 struct drm_i915_file_private *i915_file_priv;
882
883 DRM_DEBUG("\n");
884 i915_file_priv = (struct drm_i915_file_private *)
885 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
886
887 if (!i915_file_priv)
888 return -ENOMEM;
889
890 file_priv->driver_priv = i915_file_priv;
891
892 i915_file_priv->mm.last_gem_seqno = 0;
893 i915_file_priv->mm.last_gem_throttle_seqno = 0;
894
895 return 0;
896}
897
803void i915_driver_lastclose(struct drm_device * dev) 898void i915_driver_lastclose(struct drm_device * dev)
804{ 899{
805 drm_i915_private_t *dev_priv = dev->dev_private; 900 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -807,6 +902,8 @@ void i915_driver_lastclose(struct drm_device * dev)
807 if (!dev_priv) 902 if (!dev_priv)
808 return; 903 return;
809 904
905 i915_gem_lastclose(dev);
906
810 if (dev_priv->agp_heap) 907 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap)); 908 i915_mem_takedown(&(dev_priv->agp_heap));
812 909
@@ -819,6 +916,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
819 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 916 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
820} 917}
821 918
919void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
920{
921 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
922
923 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
924}
925
822struct drm_ioctl_desc i915_ioctls[] = { 926struct drm_ioctl_desc i915_ioctls[] = {
823 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 927 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
824 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 928 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -836,7 +940,23 @@ struct drm_ioctl_desc i915_ioctls[] = {
836 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 940 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
837 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 941 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
838 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 942 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
839 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), 943 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
944 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
945 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
946 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
947 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
948 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
949 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
950 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
951 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
952 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
953 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
954 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
955 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
956 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
957 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
958 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
959 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
840}; 960};
841 961
842int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 962int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 93aed1c38bd2..a80ead215282 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
38 i915_PCI_IDS 38 i915_PCI_IDS
39}; 39};
40 40
41enum pipe {
42 PIPE_A = 0,
43 PIPE_B,
44};
45
46static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
47{
48 struct drm_i915_private *dev_priv = dev->dev_private;
49
50 if (pipe == PIPE_A)
51 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
52 else
53 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
54}
55
56static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
57{
58 struct drm_i915_private *dev_priv = dev->dev_private;
59 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
60 u32 *array;
61 int i;
62
63 if (!i915_pipe_enabled(dev, pipe))
64 return;
65
66 if (pipe == PIPE_A)
67 array = dev_priv->save_palette_a;
68 else
69 array = dev_priv->save_palette_b;
70
71 for(i = 0; i < 256; i++)
72 array[i] = I915_READ(reg + (i << 2));
73}
74
75static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
76{
77 struct drm_i915_private *dev_priv = dev->dev_private;
78 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
79 u32 *array;
80 int i;
81
82 if (!i915_pipe_enabled(dev, pipe))
83 return;
84
85 if (pipe == PIPE_A)
86 array = dev_priv->save_palette_a;
87 else
88 array = dev_priv->save_palette_b;
89
90 for(i = 0; i < 256; i++)
91 I915_WRITE(reg + (i << 2), array[i]);
92}
93
94static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
95{
96 outb(reg, index_port);
97 return inb(data_port);
98}
99
100static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
101{
102 inb(st01);
103 outb(palette_enable | reg, VGA_AR_INDEX);
104 return inb(VGA_AR_DATA_READ);
105}
106
107static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
108{
109 inb(st01);
110 outb(palette_enable | reg, VGA_AR_INDEX);
111 outb(val, VGA_AR_DATA_WRITE);
112}
113
114static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
115{
116 outb(reg, index_port);
117 outb(val, data_port);
118}
119
120static void i915_save_vga(struct drm_device *dev)
121{
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 int i;
124 u16 cr_index, cr_data, st01;
125
126 /* VGA color palette registers */
127 dev_priv->saveDACMASK = inb(VGA_DACMASK);
128 /* DACCRX automatically increments during read */
129 outb(0, VGA_DACRX);
130 /* Read 3 bytes of color data from each index */
131 for (i = 0; i < 256 * 3; i++)
132 dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
133
134 /* MSR bits */
135 dev_priv->saveMSR = inb(VGA_MSR_READ);
136 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
137 cr_index = VGA_CR_INDEX_CGA;
138 cr_data = VGA_CR_DATA_CGA;
139 st01 = VGA_ST01_CGA;
140 } else {
141 cr_index = VGA_CR_INDEX_MDA;
142 cr_data = VGA_CR_DATA_MDA;
143 st01 = VGA_ST01_MDA;
144 }
145
146 /* CRT controller regs */
147 i915_write_indexed(cr_index, cr_data, 0x11,
148 i915_read_indexed(cr_index, cr_data, 0x11) &
149 (~0x80));
150 for (i = 0; i <= 0x24; i++)
151 dev_priv->saveCR[i] =
152 i915_read_indexed(cr_index, cr_data, i);
153 /* Make sure we don't turn off CR group 0 writes */
154 dev_priv->saveCR[0x11] &= ~0x80;
155
156 /* Attribute controller registers */
157 inb(st01);
158 dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
159 for (i = 0; i <= 0x14; i++)
160 dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
161 inb(st01);
162 outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
163 inb(st01);
164
165 /* Graphics controller registers */
166 for (i = 0; i < 9; i++)
167 dev_priv->saveGR[i] =
168 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
169
170 dev_priv->saveGR[0x10] =
171 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
172 dev_priv->saveGR[0x11] =
173 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
174 dev_priv->saveGR[0x18] =
175 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
176
177 /* Sequencer registers */
178 for (i = 0; i < 8; i++)
179 dev_priv->saveSR[i] =
180 i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
181}
182
183static void i915_restore_vga(struct drm_device *dev)
184{
185 struct drm_i915_private *dev_priv = dev->dev_private;
186 int i;
187 u16 cr_index, cr_data, st01;
188
189 /* MSR bits */
190 outb(dev_priv->saveMSR, VGA_MSR_WRITE);
191 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
192 cr_index = VGA_CR_INDEX_CGA;
193 cr_data = VGA_CR_DATA_CGA;
194 st01 = VGA_ST01_CGA;
195 } else {
196 cr_index = VGA_CR_INDEX_MDA;
197 cr_data = VGA_CR_DATA_MDA;
198 st01 = VGA_ST01_MDA;
199 }
200
201 /* Sequencer registers, don't write SR07 */
202 for (i = 0; i < 7; i++)
203 i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
204 dev_priv->saveSR[i]);
205
206 /* CRT controller regs */
207 /* Enable CR group 0 writes */
208 i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
209 for (i = 0; i <= 0x24; i++)
210 i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
211
212 /* Graphics controller regs */
213 for (i = 0; i < 9; i++)
214 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
215 dev_priv->saveGR[i]);
216
217 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
218 dev_priv->saveGR[0x10]);
219 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
220 dev_priv->saveGR[0x11]);
221 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
222 dev_priv->saveGR[0x18]);
223
224 /* Attribute controller registers */
225 inb(st01);
226 for (i = 0; i <= 0x14; i++)
227 i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
228 inb(st01); /* switch back to index mode */
229 outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
230 inb(st01);
231
232 /* VGA color palette registers */
233 outb(dev_priv->saveDACMASK, VGA_DACMASK);
234 /* DACCRX automatically increments during read */
235 outb(0, VGA_DACWX);
236 /* Read 3 bytes of color data from each index */
237 for (i = 0; i < 256 * 3; i++)
238 outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
239
240}
241
242static int i915_suspend(struct drm_device *dev, pm_message_t state) 41static int i915_suspend(struct drm_device *dev, pm_message_t state)
243{ 42{
244 struct drm_i915_private *dev_priv = dev->dev_private; 43 struct drm_i915_private *dev_priv = dev->dev_private;
245 int i;
246 44
247 if (!dev || !dev_priv) { 45 if (!dev || !dev_priv) {
248 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); 46 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
@@ -254,122 +52,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
254 return 0; 52 return 0;
255 53
256 pci_save_state(dev->pdev); 54 pci_save_state(dev->pdev);
257 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
258
259 /* Display arbitration control */
260 dev_priv->saveDSPARB = I915_READ(DSPARB);
261
262 /* Pipe & plane A info */
263 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
264 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
265 dev_priv->saveFPA0 = I915_READ(FPA0);
266 dev_priv->saveFPA1 = I915_READ(FPA1);
267 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
268 if (IS_I965G(dev))
269 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
270 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
271 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
272 dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
273 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
274 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
275 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
276 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
277
278 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
279 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
280 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
281 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
282 dev_priv->saveDSPABASE = I915_READ(DSPABASE);
283 if (IS_I965G(dev)) {
284 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
285 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
286 }
287 i915_save_palette(dev, PIPE_A);
288 dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
289
290 /* Pipe & plane B info */
291 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
292 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
293 dev_priv->saveFPB0 = I915_READ(FPB0);
294 dev_priv->saveFPB1 = I915_READ(FPB1);
295 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
296 if (IS_I965G(dev))
297 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
298 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
299 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
300 dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
301 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
302 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
303 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
304 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
305
306 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
307 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
308 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
309 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
310 dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
311 if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
312 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
313 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
314 }
315 i915_save_palette(dev, PIPE_B);
316 dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
317 55
318 /* CRT state */ 56 i915_save_state(dev);
319 dev_priv->saveADPA = I915_READ(ADPA);
320 57
321 /* LVDS state */ 58 intel_opregion_free(dev);
322 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
323 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
324 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
325 if (IS_I965G(dev))
326 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
327 if (IS_MOBILE(dev) && !IS_I830(dev))
328 dev_priv->saveLVDS = I915_READ(LVDS);
329 if (!IS_I830(dev) && !IS_845G(dev))
330 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
331 dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
332 dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
333 dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
334
335 /* FIXME: save TV & SDVO state */
336
337 /* FBC state */
338 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
339 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
340 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
341 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
342
343 /* Interrupt state */
344 dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
345 dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
346 dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
347
348 /* VGA state */
349 dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
350 dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
351 dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
352 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
353
354 /* Clock gating state */
355 dev_priv->saveD_STATE = I915_READ(D_STATE);
356 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
357
358 /* Cache mode state */
359 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
360
361 /* Memory Arbitration state */
362 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
363
364 /* Scratch space */
365 for (i = 0; i < 16; i++) {
366 dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
367 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
368 }
369 for (i = 0; i < 3; i++)
370 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
371
372 i915_save_vga(dev);
373 59
374 if (state.event == PM_EVENT_SUSPEND) { 60 if (state.event == PM_EVENT_SUSPEND) {
375 /* Shut down the device */ 61 /* Shut down the device */
@@ -382,155 +68,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
382 68
383static int i915_resume(struct drm_device *dev) 69static int i915_resume(struct drm_device *dev)
384{ 70{
385 struct drm_i915_private *dev_priv = dev->dev_private;
386 int i;
387
388 pci_set_power_state(dev->pdev, PCI_D0); 71 pci_set_power_state(dev->pdev, PCI_D0);
389 pci_restore_state(dev->pdev); 72 pci_restore_state(dev->pdev);
390 if (pci_enable_device(dev->pdev)) 73 if (pci_enable_device(dev->pdev))
391 return -1; 74 return -1;
392 pci_set_master(dev->pdev); 75 pci_set_master(dev->pdev);
393 76
394 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 77 i915_restore_state(dev);
395
396 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
397
398 /* Pipe & plane A info */
399 /* Prime the clock */
400 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
401 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
402 ~DPLL_VCO_ENABLE);
403 udelay(150);
404 }
405 I915_WRITE(FPA0, dev_priv->saveFPA0);
406 I915_WRITE(FPA1, dev_priv->saveFPA1);
407 /* Actually enable it */
408 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
409 udelay(150);
410 if (IS_I965G(dev))
411 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
412 udelay(150);
413
414 /* Restore mode */
415 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
416 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
417 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
418 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
419 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
420 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
421 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
422
423 /* Restore plane info */
424 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
425 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
426 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
427 I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
428 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
429 if (IS_I965G(dev)) {
430 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
431 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
432 }
433
434 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
435
436 i915_restore_palette(dev, PIPE_A);
437 /* Enable the plane */
438 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
439 I915_WRITE(DSPABASE, I915_READ(DSPABASE));
440
441 /* Pipe & plane B info */
442 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
443 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
444 ~DPLL_VCO_ENABLE);
445 udelay(150);
446 }
447 I915_WRITE(FPB0, dev_priv->saveFPB0);
448 I915_WRITE(FPB1, dev_priv->saveFPB1);
449 /* Actually enable it */
450 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
451 udelay(150);
452 if (IS_I965G(dev))
453 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
454 udelay(150);
455
456 /* Restore mode */
457 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
458 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
459 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
460 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
461 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
462 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
463 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
464
465 /* Restore plane info */
466 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
467 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
468 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
469 I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
470 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
471 if (IS_I965G(dev)) {
472 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
473 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
474 }
475
476 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
477
478 i915_restore_palette(dev, PIPE_B);
479 /* Enable the plane */
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
482
483 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA);
485
486 /* LVDS state */
487 if (IS_I965G(dev))
488 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
489 if (IS_MOBILE(dev) && !IS_I830(dev))
490 I915_WRITE(LVDS, dev_priv->saveLVDS);
491 if (!IS_I830(dev) && !IS_845G(dev))
492 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
493
494 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
495 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
496 I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
497 I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
498 I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
499 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
500
501 /* FIXME: restore TV & SDVO state */
502
503 /* FBC info */
504 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
505 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
506 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
507 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
508
509 /* VGA state */
510 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
511 I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
512 I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
513 I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
514 udelay(150);
515
516 /* Clock gating state */
517 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
518 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
519
520 /* Cache mode state */
521 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
522
523 /* Memory arbitration state */
524 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
525
526 for (i = 0; i < 16; i++) {
527 I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
528 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
529 }
530 for (i = 0; i < 3; i++)
531 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
532 78
533 i915_restore_vga(dev); 79 intel_opregion_init(dev);
534 80
535 return 0; 81 return 0;
536} 82}
@@ -541,17 +87,19 @@ static struct drm_driver driver = {
541 */ 87 */
542 .driver_features = 88 .driver_features =
543 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 89 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
544 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | 90 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
545 DRIVER_IRQ_VBL2,
546 .load = i915_driver_load, 91 .load = i915_driver_load,
547 .unload = i915_driver_unload, 92 .unload = i915_driver_unload,
93 .open = i915_driver_open,
548 .lastclose = i915_driver_lastclose, 94 .lastclose = i915_driver_lastclose,
549 .preclose = i915_driver_preclose, 95 .preclose = i915_driver_preclose,
96 .postclose = i915_driver_postclose,
550 .suspend = i915_suspend, 97 .suspend = i915_suspend,
551 .resume = i915_resume, 98 .resume = i915_resume,
552 .device_is_agp = i915_driver_device_is_agp, 99 .device_is_agp = i915_driver_device_is_agp,
553 .vblank_wait = i915_driver_vblank_wait, 100 .get_vblank_counter = i915_get_vblank_counter,
554 .vblank_wait2 = i915_driver_vblank_wait2, 101 .enable_vblank = i915_enable_vblank,
102 .disable_vblank = i915_disable_vblank,
555 .irq_preinstall = i915_driver_irq_preinstall, 103 .irq_preinstall = i915_driver_irq_preinstall,
556 .irq_postinstall = i915_driver_irq_postinstall, 104 .irq_postinstall = i915_driver_irq_postinstall,
557 .irq_uninstall = i915_driver_irq_uninstall, 105 .irq_uninstall = i915_driver_irq_uninstall,
@@ -559,6 +107,10 @@ static struct drm_driver driver = {
559 .reclaim_buffers = drm_core_reclaim_buffers, 107 .reclaim_buffers = drm_core_reclaim_buffers,
560 .get_map_ofs = drm_core_get_map_ofs, 108 .get_map_ofs = drm_core_get_map_ofs,
561 .get_reg_ofs = drm_core_get_reg_ofs, 109 .get_reg_ofs = drm_core_get_reg_ofs,
110 .proc_init = i915_gem_proc_init,
111 .proc_cleanup = i915_gem_proc_cleanup,
112 .gem_init_object = i915_gem_init_object,
113 .gem_free_object = i915_gem_free_object,
562 .ioctls = i915_ioctls, 114 .ioctls = i915_ioctls,
563 .fops = { 115 .fops = {
564 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d7326d92a237..eae4ed3956e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
30#ifndef _I915_DRV_H_ 30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include "i915_reg.h"
34
33/* General customization: 35/* General customization:
34 */ 36 */
35 37
@@ -37,7 +39,12 @@
37 39
38#define DRIVER_NAME "i915" 40#define DRIVER_NAME "i915"
39#define DRIVER_DESC "Intel Graphics" 41#define DRIVER_DESC "Intel Graphics"
40#define DRIVER_DATE "20060119" 42#define DRIVER_DATE "20080730"
43
44enum pipe {
45 PIPE_A = 0,
46 PIPE_B,
47};
41 48
42/* Interface history: 49/* Interface history:
43 * 50 *
@@ -53,16 +60,23 @@
53#define DRIVER_MINOR 6 60#define DRIVER_MINOR 6
54#define DRIVER_PATCHLEVEL 0 61#define DRIVER_PATCHLEVEL 0
55 62
63#define WATCH_COHERENCY 0
64#define WATCH_BUF 0
65#define WATCH_EXEC 0
66#define WATCH_LRU 0
67#define WATCH_RELOC 0
68#define WATCH_INACTIVE 0
69#define WATCH_PWRITE 0
70
56typedef struct _drm_i915_ring_buffer { 71typedef struct _drm_i915_ring_buffer {
57 int tail_mask; 72 int tail_mask;
58 unsigned long Start;
59 unsigned long End;
60 unsigned long Size; 73 unsigned long Size;
61 u8 *virtual_start; 74 u8 *virtual_start;
62 int head; 75 int head;
63 int tail; 76 int tail;
64 int space; 77 int space;
65 drm_local_map_t map; 78 drm_local_map_t map;
79 struct drm_gem_object *ring_obj;
66} drm_i915_ring_buffer_t; 80} drm_i915_ring_buffer_t;
67 81
68struct mem_block { 82struct mem_block {
@@ -76,13 +90,28 @@ struct mem_block {
76typedef struct _drm_i915_vbl_swap { 90typedef struct _drm_i915_vbl_swap {
77 struct list_head head; 91 struct list_head head;
78 drm_drawable_t drw_id; 92 drm_drawable_t drw_id;
79 unsigned int pipe; 93 unsigned int plane;
80 unsigned int sequence; 94 unsigned int sequence;
81} drm_i915_vbl_swap_t; 95} drm_i915_vbl_swap_t;
82 96
97struct opregion_header;
98struct opregion_acpi;
99struct opregion_swsci;
100struct opregion_asle;
101
102struct intel_opregion {
103 struct opregion_header *header;
104 struct opregion_acpi *acpi;
105 struct opregion_swsci *swsci;
106 struct opregion_asle *asle;
107 int enabled;
108};
109
83typedef struct drm_i915_private { 110typedef struct drm_i915_private {
111 struct drm_device *dev;
112
113 void __iomem *regs;
84 drm_local_map_t *sarea; 114 drm_local_map_t *sarea;
85 drm_local_map_t *mmio_map;
86 115
87 drm_i915_sarea_t *sarea_priv; 116 drm_i915_sarea_t *sarea_priv;
88 drm_i915_ring_buffer_t ring; 117 drm_i915_ring_buffer_t ring;
@@ -90,20 +119,25 @@ typedef struct drm_i915_private {
90 drm_dma_handle_t *status_page_dmah; 119 drm_dma_handle_t *status_page_dmah;
91 void *hw_status_page; 120 void *hw_status_page;
92 dma_addr_t dma_status_page; 121 dma_addr_t dma_status_page;
93 unsigned long counter; 122 uint32_t counter;
94 unsigned int status_gfx_addr; 123 unsigned int status_gfx_addr;
95 drm_local_map_t hws_map; 124 drm_local_map_t hws_map;
125 struct drm_gem_object *hws_obj;
96 126
97 unsigned int cpp; 127 unsigned int cpp;
98 int back_offset; 128 int back_offset;
99 int front_offset; 129 int front_offset;
100 int current_page; 130 int current_page;
101 int page_flipping; 131 int page_flipping;
102 int use_mi_batchbuffer_start;
103 132
104 wait_queue_head_t irq_queue; 133 wait_queue_head_t irq_queue;
105 atomic_t irq_received; 134 atomic_t irq_received;
106 atomic_t irq_emitted; 135 /** Protects user_irq_refcount and irq_mask_reg */
136 spinlock_t user_irq_lock;
137 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
138 int user_irq_refcount;
139 /** Cached value of IMR to avoid reads in updating the bitfield */
140 u32 irq_mask_reg;
107 141
108 int tex_lru_log_granularity; 142 int tex_lru_log_granularity;
109 int allow_batchbuffer; 143 int allow_batchbuffer;
@@ -115,6 +149,8 @@ typedef struct drm_i915_private {
115 drm_i915_vbl_swap_t vbl_swaps; 149 drm_i915_vbl_swap_t vbl_swaps;
116 unsigned int swaps_pending; 150 unsigned int swaps_pending;
117 151
152 struct intel_opregion opregion;
153
118 /* Register state */ 154 /* Register state */
119 u8 saveLBB; 155 u8 saveLBB;
120 u32 saveDSPACNTR; 156 u32 saveDSPACNTR;
@@ -139,7 +175,7 @@ typedef struct drm_i915_private {
139 u32 saveDSPASTRIDE; 175 u32 saveDSPASTRIDE;
140 u32 saveDSPASIZE; 176 u32 saveDSPASIZE;
141 u32 saveDSPAPOS; 177 u32 saveDSPAPOS;
142 u32 saveDSPABASE; 178 u32 saveDSPAADDR;
143 u32 saveDSPASURF; 179 u32 saveDSPASURF;
144 u32 saveDSPATILEOFF; 180 u32 saveDSPATILEOFF;
145 u32 savePFIT_PGM_RATIOS; 181 u32 savePFIT_PGM_RATIOS;
@@ -160,24 +196,24 @@ typedef struct drm_i915_private {
160 u32 saveDSPBSTRIDE; 196 u32 saveDSPBSTRIDE;
161 u32 saveDSPBSIZE; 197 u32 saveDSPBSIZE;
162 u32 saveDSPBPOS; 198 u32 saveDSPBPOS;
163 u32 saveDSPBBASE; 199 u32 saveDSPBADDR;
164 u32 saveDSPBSURF; 200 u32 saveDSPBSURF;
165 u32 saveDSPBTILEOFF; 201 u32 saveDSPBTILEOFF;
166 u32 saveVCLK_DIVISOR_VGA0; 202 u32 saveVGA0;
167 u32 saveVCLK_DIVISOR_VGA1; 203 u32 saveVGA1;
168 u32 saveVCLK_POST_DIV; 204 u32 saveVGA_PD;
169 u32 saveVGACNTRL; 205 u32 saveVGACNTRL;
170 u32 saveADPA; 206 u32 saveADPA;
171 u32 saveLVDS; 207 u32 saveLVDS;
172 u32 saveLVDSPP_ON; 208 u32 savePP_ON_DELAYS;
173 u32 saveLVDSPP_OFF; 209 u32 savePP_OFF_DELAYS;
174 u32 saveDVOA; 210 u32 saveDVOA;
175 u32 saveDVOB; 211 u32 saveDVOB;
176 u32 saveDVOC; 212 u32 saveDVOC;
177 u32 savePP_ON; 213 u32 savePP_ON;
178 u32 savePP_OFF; 214 u32 savePP_OFF;
179 u32 savePP_CONTROL; 215 u32 savePP_CONTROL;
180 u32 savePP_CYCLE; 216 u32 savePP_DIVISOR;
181 u32 savePFIT_CONTROL; 217 u32 savePFIT_CONTROL;
182 u32 save_palette_a[256]; 218 u32 save_palette_a[256];
183 u32 save_palette_b[256]; 219 u32 save_palette_b[256];
@@ -190,7 +226,7 @@ typedef struct drm_i915_private {
190 u32 saveIMR; 226 u32 saveIMR;
191 u32 saveCACHE_MODE_0; 227 u32 saveCACHE_MODE_0;
192 u32 saveD_STATE; 228 u32 saveD_STATE;
193 u32 saveDSPCLK_GATE_D; 229 u32 saveCG_2D_DIS;
194 u32 saveMI_ARB_STATE; 230 u32 saveMI_ARB_STATE;
195 u32 saveSWF0[16]; 231 u32 saveSWF0[16];
196 u32 saveSWF1[16]; 232 u32 saveSWF1[16];
@@ -203,8 +239,180 @@ typedef struct drm_i915_private {
203 u8 saveDACMASK; 239 u8 saveDACMASK;
204 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
205 u8 saveCR[37]; 241 u8 saveCR[37];
242
243 struct {
244 struct drm_mm gtt_space;
245
246 /**
247 * List of objects currently involved in rendering from the
248 * ringbuffer.
249 *
250 * A reference is held on the buffer while on this list.
251 */
252 struct list_head active_list;
253
254 /**
255 * List of objects which are not in the ringbuffer but which
256 * still have a write_domain which needs to be flushed before
257 * unbinding.
258 *
259 * A reference is held on the buffer while on this list.
260 */
261 struct list_head flushing_list;
262
263 /**
264 * LRU list of objects which are not in the ringbuffer and
265 * are ready to unbind, but are still in the GTT.
266 *
267 * A reference is not held on the buffer while on this list,
268 * as merely being GTT-bound shouldn't prevent its being
269 * freed, and we'll pull it off the list in the free path.
270 */
271 struct list_head inactive_list;
272
273 /**
274 * List of breadcrumbs associated with GPU requests currently
275 * outstanding.
276 */
277 struct list_head request_list;
278
279 /**
280 * We leave the user IRQ off as much as possible,
281 * but this means that requests will finish and never
282 * be retired once the system goes idle. Set a timer to
283 * fire periodically while the ring is running. When it
284 * fires, go retire requests.
285 */
286 struct delayed_work retire_work;
287
288 /** Work task for vblank-related ring access */
289 struct work_struct vblank_work;
290
291 uint32_t next_gem_seqno;
292
293 /**
294 * Waiting sequence number, if any
295 */
296 uint32_t waiting_gem_seqno;
297
298 /**
299 * Last seq seen at irq time
300 */
301 uint32_t irq_gem_seqno;
302
303 /**
304 * Flag if the X Server, and thus DRM, is not currently in
305 * control of the device.
306 *
307 * This is set between LeaveVT and EnterVT. It needs to be
308 * replaced with a semaphore. It also needs to be
309 * transitioned away from for kernel modesetting.
310 */
311 int suspended;
312
313 /**
314 * Flag if the hardware appears to be wedged.
315 *
316 * This is set when attempts to idle the device timeout.
317 * It prevents command submission from occuring and makes
318 * every pending request fail
319 */
320 int wedged;
321
322 /** Bit 6 swizzling required for X tiling */
323 uint32_t bit_6_swizzle_x;
324 /** Bit 6 swizzling required for Y tiling */
325 uint32_t bit_6_swizzle_y;
326 } mm;
206} drm_i915_private_t; 327} drm_i915_private_t;
207 328
329/** driver private structure attached to each drm_gem_object */
330struct drm_i915_gem_object {
331 struct drm_gem_object *obj;
332
333 /** Current space allocated to this object in the GTT, if any. */
334 struct drm_mm_node *gtt_space;
335
336 /** This object's place on the active/flushing/inactive lists */
337 struct list_head list;
338
339 /**
340 * This is set if the object is on the active or flushing lists
341 * (has pending rendering), and is not set if it's on inactive (ready
342 * to be unbound).
343 */
344 int active;
345
346 /**
347 * This is set if the object has been written to since last bound
348 * to the GTT
349 */
350 int dirty;
351
352 /** AGP memory structure for our GTT binding. */
353 DRM_AGP_MEM *agp_mem;
354
355 struct page **page_list;
356
357 /**
358 * Current offset of the object in GTT space.
359 *
360 * This is the same as gtt_space->start
361 */
362 uint32_t gtt_offset;
363
364 /** Boolean whether this object has a valid gtt offset. */
365 int gtt_bound;
366
367 /** How many users have pinned this object in GTT space */
368 int pin_count;
369
370 /** Breadcrumb of last rendering to the buffer. */
371 uint32_t last_rendering_seqno;
372
373 /** Current tiling mode for the object. */
374 uint32_t tiling_mode;
375
376 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
377 uint32_t agp_type;
378
379 /**
380 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
381 * GEM_DOMAIN_CPU is not in the object's read domain.
382 */
383 uint8_t *page_cpu_valid;
384};
385
386/**
387 * Request queue structure.
388 *
389 * The request queue allows us to note sequence numbers that have been emitted
390 * and may be associated with active buffers to be retired.
391 *
392 * By keeping this list, we can avoid having to do questionable
393 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
394 * an emission time with seqnos for tracking how far ahead of the GPU we are.
395 */
396struct drm_i915_gem_request {
397 /** GEM sequence number associated with this request. */
398 uint32_t seqno;
399
400 /** Time at which this request was emitted, in jiffies. */
401 unsigned long emitted_jiffies;
402
403 /** Cache domains that were flushed at the start of the request. */
404 uint32_t flush_domains;
405
406 struct list_head list;
407};
408
409struct drm_i915_file_private {
410 struct {
411 uint32_t last_gem_seqno;
412 uint32_t last_gem_throttle_seqno;
413 } mm;
414};
415
208extern struct drm_ioctl_desc i915_ioctls[]; 416extern struct drm_ioctl_desc i915_ioctls[];
209extern int i915_max_ioctl; 417extern int i915_max_ioctl;
210 418
@@ -212,31 +420,42 @@ extern int i915_max_ioctl;
212extern void i915_kernel_lost_context(struct drm_device * dev); 420extern void i915_kernel_lost_context(struct drm_device * dev);
213extern int i915_driver_load(struct drm_device *, unsigned long flags); 421extern int i915_driver_load(struct drm_device *, unsigned long flags);
214extern int i915_driver_unload(struct drm_device *); 422extern int i915_driver_unload(struct drm_device *);
423extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
215extern void i915_driver_lastclose(struct drm_device * dev); 424extern void i915_driver_lastclose(struct drm_device * dev);
216extern void i915_driver_preclose(struct drm_device *dev, 425extern void i915_driver_preclose(struct drm_device *dev,
217 struct drm_file *file_priv); 426 struct drm_file *file_priv);
427extern void i915_driver_postclose(struct drm_device *dev,
428 struct drm_file *file_priv);
218extern int i915_driver_device_is_agp(struct drm_device * dev); 429extern int i915_driver_device_is_agp(struct drm_device * dev);
219extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 430extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
220 unsigned long arg); 431 unsigned long arg);
432extern int i915_emit_box(struct drm_device *dev,
433 struct drm_clip_rect __user *boxes,
434 int i, int DR1, int DR4);
221 435
222/* i915_irq.c */ 436/* i915_irq.c */
223extern int i915_irq_emit(struct drm_device *dev, void *data, 437extern int i915_irq_emit(struct drm_device *dev, void *data,
224 struct drm_file *file_priv); 438 struct drm_file *file_priv);
225extern int i915_irq_wait(struct drm_device *dev, void *data, 439extern int i915_irq_wait(struct drm_device *dev, void *data,
226 struct drm_file *file_priv); 440 struct drm_file *file_priv);
441void i915_user_irq_get(struct drm_device *dev);
442void i915_user_irq_put(struct drm_device *dev);
227 443
228extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 444extern void i915_gem_vblank_work_handler(struct work_struct *work);
229extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
230extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
231extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern void i915_driver_irq_preinstall(struct drm_device * dev);
232extern void i915_driver_irq_postinstall(struct drm_device * dev); 447extern int i915_driver_irq_postinstall(struct drm_device *dev);
233extern void i915_driver_irq_uninstall(struct drm_device * dev); 448extern void i915_driver_irq_uninstall(struct drm_device * dev);
234extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 449extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
235 struct drm_file *file_priv); 450 struct drm_file *file_priv);
236extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 451extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
237 struct drm_file *file_priv); 452 struct drm_file *file_priv);
453extern int i915_enable_vblank(struct drm_device *dev, int crtc);
454extern void i915_disable_vblank(struct drm_device *dev, int crtc);
455extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
238extern int i915_vblank_swap(struct drm_device *dev, void *data, 456extern int i915_vblank_swap(struct drm_device *dev, void *data,
239 struct drm_file *file_priv); 457 struct drm_file *file_priv);
458extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
240 459
241/* i915_mem.c */ 460/* i915_mem.c */
242extern int i915_mem_alloc(struct drm_device *dev, void *data, 461extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -250,11 +469,99 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
250extern void i915_mem_takedown(struct mem_block **heap); 469extern void i915_mem_takedown(struct mem_block **heap);
251extern void i915_mem_release(struct drm_device * dev, 470extern void i915_mem_release(struct drm_device * dev,
252 struct drm_file *file_priv, struct mem_block *heap); 471 struct drm_file *file_priv, struct mem_block *heap);
472/* i915_gem.c */
473int i915_gem_init_ioctl(struct drm_device *dev, void *data,
474 struct drm_file *file_priv);
475int i915_gem_create_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv);
477int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
478 struct drm_file *file_priv);
479int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
480 struct drm_file *file_priv);
481int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
482 struct drm_file *file_priv);
483int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
484 struct drm_file *file_priv);
485int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
486 struct drm_file *file_priv);
487int i915_gem_execbuffer(struct drm_device *dev, void *data,
488 struct drm_file *file_priv);
489int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
490 struct drm_file *file_priv);
491int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
492 struct drm_file *file_priv);
493int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
494 struct drm_file *file_priv);
495int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
496 struct drm_file *file_priv);
497int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
498 struct drm_file *file_priv);
499int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
500 struct drm_file *file_priv);
501int i915_gem_set_tiling(struct drm_device *dev, void *data,
502 struct drm_file *file_priv);
503int i915_gem_get_tiling(struct drm_device *dev, void *data,
504 struct drm_file *file_priv);
505void i915_gem_load(struct drm_device *dev);
506int i915_gem_proc_init(struct drm_minor *minor);
507void i915_gem_proc_cleanup(struct drm_minor *minor);
508int i915_gem_init_object(struct drm_gem_object *obj);
509void i915_gem_free_object(struct drm_gem_object *obj);
510int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
511void i915_gem_object_unpin(struct drm_gem_object *obj);
512void i915_gem_lastclose(struct drm_device *dev);
513uint32_t i915_get_gem_seqno(struct drm_device *dev);
514void i915_gem_retire_requests(struct drm_device *dev);
515void i915_gem_retire_work_handler(struct work_struct *work);
516void i915_gem_clflush_object(struct drm_gem_object *obj);
517
518/* i915_gem_tiling.c */
519void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
520
521/* i915_gem_debug.c */
522void i915_gem_dump_object(struct drm_gem_object *obj, int len,
523 const char *where, uint32_t mark);
524#if WATCH_INACTIVE
525void i915_verify_inactive(struct drm_device *dev, char *file, int line);
526#else
527#define i915_verify_inactive(dev, file, line)
528#endif
529void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
530void i915_gem_dump_object(struct drm_gem_object *obj, int len,
531 const char *where, uint32_t mark);
532void i915_dump_lru(struct drm_device *dev, const char *where);
533
534/* i915_suspend.c */
535extern int i915_save_state(struct drm_device *dev);
536extern int i915_restore_state(struct drm_device *dev);
537
538/* i915_suspend.c */
539extern int i915_save_state(struct drm_device *dev);
540extern int i915_restore_state(struct drm_device *dev);
541
542/* i915_opregion.c */
543extern int intel_opregion_init(struct drm_device *dev);
544extern void intel_opregion_free(struct drm_device *dev);
545extern void opregion_asle_intr(struct drm_device *dev);
546extern void opregion_enable_asle(struct drm_device *dev);
547
548/**
549 * Lock test for when it's just for synchronization of ring access.
550 *
551 * In that case, we don't need to do it when GEM is initialized as nobody else
552 * has access to the ring.
553 */
554#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
555 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
556 LOCK_TEST_WITH_RETURN(dev, file_priv); \
557} while (0)
253 558
254#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 559#define I915_READ(reg) readl(dev_priv->regs + (reg))
255#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 560#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
256#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 561#define I915_READ16(reg) readw(dev_priv->regs + (reg))
257#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 562#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
563#define I915_READ8(reg) readb(dev_priv->regs + (reg))
564#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
258 565
259#define I915_VERBOSE 0 566#define I915_VERBOSE 0
260 567
@@ -284,816 +591,29 @@ extern void i915_mem_release(struct drm_device * dev,
284 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 591 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
285 dev_priv->ring.tail = outring; \ 592 dev_priv->ring.tail = outring; \
286 dev_priv->ring.space -= outcount * 4; \ 593 dev_priv->ring.space -= outcount * 4; \
287 I915_WRITE(LP_RING + RING_TAIL, outring); \ 594 I915_WRITE(PRB0_TAIL, outring); \
288} while(0) 595} while(0)
289 596
290extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
291
292/* Extended config space */
293#define LBB 0xf4
294
295/* VGA stuff */
296
297#define VGA_ST01_MDA 0x3ba
298#define VGA_ST01_CGA 0x3da
299
300#define VGA_MSR_WRITE 0x3c2
301#define VGA_MSR_READ 0x3cc
302#define VGA_MSR_MEM_EN (1<<1)
303#define VGA_MSR_CGA_MODE (1<<0)
304
305#define VGA_SR_INDEX 0x3c4
306#define VGA_SR_DATA 0x3c5
307
308#define VGA_AR_INDEX 0x3c0
309#define VGA_AR_VID_EN (1<<5)
310#define VGA_AR_DATA_WRITE 0x3c0
311#define VGA_AR_DATA_READ 0x3c1
312
313#define VGA_GR_INDEX 0x3ce
314#define VGA_GR_DATA 0x3cf
315/* GR05 */
316#define VGA_GR_MEM_READ_MODE_SHIFT 3
317#define VGA_GR_MEM_READ_MODE_PLANE 1
318/* GR06 */
319#define VGA_GR_MEM_MODE_MASK 0xc
320#define VGA_GR_MEM_MODE_SHIFT 2
321#define VGA_GR_MEM_A0000_AFFFF 0
322#define VGA_GR_MEM_A0000_BFFFF 1
323#define VGA_GR_MEM_B0000_B7FFF 2
324#define VGA_GR_MEM_B0000_BFFFF 3
325
326#define VGA_DACMASK 0x3c6
327#define VGA_DACRX 0x3c7
328#define VGA_DACWX 0x3c8
329#define VGA_DACDATA 0x3c9
330
331#define VGA_CR_INDEX_MDA 0x3b4
332#define VGA_CR_DATA_MDA 0x3b5
333#define VGA_CR_INDEX_CGA 0x3d4
334#define VGA_CR_DATA_CGA 0x3d5
335
336#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
337#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
338#define CMD_REPORT_HEAD (7<<23)
339#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
340#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
341
342#define INST_PARSER_CLIENT 0x00000000
343#define INST_OP_FLUSH 0x02000000
344#define INST_FLUSH_MAP_CACHE 0x00000001
345
346#define BB1_START_ADDR_MASK (~0x7)
347#define BB1_PROTECTED (1<<0)
348#define BB1_UNPROTECTED (0<<0)
349#define BB2_END_ADDR_MASK (~0x7)
350
351/* Framebuffer compression */
352#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
353#define FBC_LL_BASE 0x03204 /* 4k page aligned */
354#define FBC_CONTROL 0x03208
355#define FBC_CTL_EN (1<<31)
356#define FBC_CTL_PERIODIC (1<<30)
357#define FBC_CTL_INTERVAL_SHIFT (16)
358#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
359#define FBC_CTL_STRIDE_SHIFT (5)
360#define FBC_CTL_FENCENO (1<<0)
361#define FBC_COMMAND 0x0320c
362#define FBC_CMD_COMPRESS (1<<0)
363#define FBC_STATUS 0x03210
364#define FBC_STAT_COMPRESSING (1<<31)
365#define FBC_STAT_COMPRESSED (1<<30)
366#define FBC_STAT_MODIFIED (1<<29)
367#define FBC_STAT_CURRENT_LINE (1<<0)
368#define FBC_CONTROL2 0x03214
369#define FBC_CTL_FENCE_DBL (0<<4)
370#define FBC_CTL_IDLE_IMM (0<<2)
371#define FBC_CTL_IDLE_FULL (1<<2)
372#define FBC_CTL_IDLE_LINE (2<<2)
373#define FBC_CTL_IDLE_DEBUG (3<<2)
374#define FBC_CTL_CPU_FENCE (1<<1)
375#define FBC_CTL_PLANEA (0<<0)
376#define FBC_CTL_PLANEB (1<<0)
377#define FBC_FENCE_OFF 0x0321b
378
379#define FBC_LL_SIZE (1536)
380#define FBC_LL_PAD (32)
381
382/* Interrupt bits:
383 */
384#define USER_INT_FLAG (1<<1)
385#define VSYNC_PIPEB_FLAG (1<<5)
386#define VSYNC_PIPEA_FLAG (1<<7)
387#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
388
389#define I915REG_HWSTAM 0x02098
390#define I915REG_INT_IDENTITY_R 0x020a4
391#define I915REG_INT_MASK_R 0x020a8
392#define I915REG_INT_ENABLE_R 0x020a0
393
394#define I915REG_PIPEASTAT 0x70024
395#define I915REG_PIPEBSTAT 0x71024
396
397#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
398#define I915_VBLANK_CLEAR (1UL<<1)
399
400#define SRX_INDEX 0x3c4
401#define SRX_DATA 0x3c5
402#define SR01 1
403#define SR01_SCREEN_OFF (1<<5)
404
405#define PPCR 0x61204
406#define PPCR_ON (1<<0)
407
408#define DVOB 0x61140
409#define DVOB_ON (1<<31)
410#define DVOC 0x61160
411#define DVOC_ON (1<<31)
412#define LVDS 0x61180
413#define LVDS_ON (1<<31)
414
415#define ADPA 0x61100
416#define ADPA_DPMS_MASK (~(3<<10))
417#define ADPA_DPMS_ON (0<<10)
418#define ADPA_DPMS_SUSPEND (1<<10)
419#define ADPA_DPMS_STANDBY (2<<10)
420#define ADPA_DPMS_OFF (3<<10)
421
422#define NOPID 0x2094
423#define LP_RING 0x2030
424#define HP_RING 0x2040
425/* The binner has its own ring buffer:
426 */
427#define HWB_RING 0x2400
428
429#define RING_TAIL 0x00
430#define TAIL_ADDR 0x001FFFF8
431#define RING_HEAD 0x04
432#define HEAD_WRAP_COUNT 0xFFE00000
433#define HEAD_WRAP_ONE 0x00200000
434#define HEAD_ADDR 0x001FFFFC
435#define RING_START 0x08
436#define START_ADDR 0x0xFFFFF000
437#define RING_LEN 0x0C
438#define RING_NR_PAGES 0x001FF000
439#define RING_REPORT_MASK 0x00000006
440#define RING_REPORT_64K 0x00000002
441#define RING_REPORT_128K 0x00000004
442#define RING_NO_REPORT 0x00000000
443#define RING_VALID_MASK 0x00000001
444#define RING_VALID 0x00000001
445#define RING_INVALID 0x00000000
446
447/* Instruction parser error reg:
448 */
449#define IPEIR 0x2088
450
451/* Scratch pad debug 0 reg:
452 */
453#define SCPD0 0x209c
454
455/* Error status reg:
456 */
457#define ESR 0x20b8
458
459/* Secondary DMA fetch address debug reg:
460 */
461#define DMA_FADD_S 0x20d4
462
463/* Memory Interface Arbitration State
464 */
465#define MI_ARB_STATE 0x20e4
466
467/* Cache mode 0 reg.
468 * - Manipulating render cache behaviour is central
469 * to the concept of zone rendering, tuning this reg can help avoid
470 * unnecessary render cache reads and even writes (for z/stencil)
471 * at beginning and end of scene.
472 *
473 * - To change a bit, write to this reg with a mask bit set and the
474 * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
475 */
476#define Cache_Mode_0 0x2120
477#define CACHE_MODE_0 0x2120
478#define CM0_MASK_SHIFT 16
479#define CM0_IZ_OPT_DISABLE (1<<6)
480#define CM0_ZR_OPT_DISABLE (1<<5)
481#define CM0_DEPTH_EVICT_DISABLE (1<<4)
482#define CM0_COLOR_EVICT_DISABLE (1<<3)
483#define CM0_DEPTH_WRITE_DISABLE (1<<1)
484#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
485
486
487/* Graphics flush control. A CPU write flushes the GWB of all writes.
488 * The data is discarded.
489 */
490#define GFX_FLSH_CNTL 0x2170
491
492/* Binner control. Defines the location of the bin pointer list:
493 */
494#define BINCTL 0x2420
495#define BC_MASK (1 << 9)
496
497/* Binned scene info.
498 */
499#define BINSCENE 0x2428
500#define BS_OP_LOAD (1 << 8)
501#define BS_MASK (1 << 22)
502
503/* Bin command parser debug reg:
504 */
505#define BCPD 0x2480
506
507/* Bin memory control debug reg:
508 */
509#define BMCD 0x2484
510
511/* Bin data cache debug reg:
512 */
513#define BDCD 0x2488
514
515/* Binner pointer cache debug reg:
516 */
517#define BPCD 0x248c
518
519/* Binner scratch pad debug reg:
520 */
521#define BINSKPD 0x24f0
522
523/* HWB scratch pad debug reg:
524 */
525#define HWBSKPD 0x24f4
526
527/* Binner memory pool reg:
528 */
529#define BMP_BUFFER 0x2430
530#define BMP_PAGE_SIZE_4K (0 << 10)
531#define BMP_BUFFER_SIZE_SHIFT 1
532#define BMP_ENABLE (1 << 0)
533
534/* Get/put memory from the binner memory pool:
535 */
536#define BMP_GET 0x2438
537#define BMP_PUT 0x2440
538#define BMP_OFFSET_SHIFT 5
539
540/* 3D state packets:
541 */
542#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
543
544#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
545#define SC_UPDATE_SCISSOR (0x1<<1)
546#define SC_ENABLE_MASK (0x1<<0)
547#define SC_ENABLE (0x1<<0)
548
549#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
550
551#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
552#define SCI_YMIN_MASK (0xffff<<16)
553#define SCI_XMIN_MASK (0xffff<<0)
554#define SCI_YMAX_MASK (0xffff<<16)
555#define SCI_XMAX_MASK (0xffff<<0)
556
557#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
558#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
559#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
560#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
561#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
562#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
563#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
564
565#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
566
567#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
568#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
569#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
570#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
571#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
572#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
573
574#define MI_BATCH_BUFFER ((0x30<<23)|1)
575#define MI_BATCH_BUFFER_START (0x31<<23)
576#define MI_BATCH_BUFFER_END (0xA<<23)
577#define MI_BATCH_NON_SECURE (1)
578#define MI_BATCH_NON_SECURE_I965 (1<<8)
579
580#define MI_WAIT_FOR_EVENT ((0x3<<23))
581#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
582#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
583#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
584
585#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
586
587#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
588#define ASYNC_FLIP (1<<22)
589#define DISPLAY_PLANE_A (0<<20)
590#define DISPLAY_PLANE_B (1<<20)
591
592/* Display regs */
593#define DSPACNTR 0x70180
594#define DSPBCNTR 0x71180
595#define DISPPLANE_SEL_PIPE_MASK (1<<24)
596
597/* Define the region of interest for the binner:
598 */
599#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
600
601#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
602
603#define CMD_MI_FLUSH (0x04 << 23)
604#define MI_NO_WRITE_FLUSH (1 << 2)
605#define MI_READ_FLUSH (1 << 0)
606#define MI_EXE_FLUSH (1 << 1)
607#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
608#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
609
610#define BREADCRUMB_BITS 31
611#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
612
613#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
614#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
615
616#define BLC_PWM_CTL 0x61254
617#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
618
619#define BLC_PWM_CTL2 0x61250
620/**
621 * This is the most significant 15 bits of the number of backlight cycles in a
622 * complete cycle of the modulated backlight control.
623 *
624 * The actual value is this field multiplied by two.
625 */
626#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
627#define BLM_LEGACY_MODE (1 << 16)
628/**
629 * This is the number of cycles out of the backlight modulation cycle for which
630 * the backlight is on.
631 *
632 * This field must be no greater than the number of cycles in the complete
633 * backlight modulation cycle.
634 */
635#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
636#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
637
638#define I915_GCFGC 0xf0
639#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
640#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
641#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
642#define I915_DISPLAY_CLOCK_MASK (7 << 4)
643
644#define I855_HPLLCC 0xc0
645#define I855_CLOCK_CONTROL_MASK (3 << 0)
646#define I855_CLOCK_133_200 (0 << 0)
647#define I855_CLOCK_100_200 (1 << 0)
648#define I855_CLOCK_100_133 (2 << 0)
649#define I855_CLOCK_166_250 (3 << 0)
650
651/* p317, 319
652 */
653#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
654#define VCLK2_VCO_N 0x600a
655#define VCLK2_VCO_DIV_SEL 0x6012
656
657#define VCLK_DIVISOR_VGA0 0x6000
658#define VCLK_DIVISOR_VGA1 0x6004
659#define VCLK_POST_DIV 0x6010
660/** Selects a post divisor of 4 instead of 2. */
661# define VGA1_PD_P2_DIV_4 (1 << 15)
662/** Overrides the p2 post divisor field */
663# define VGA1_PD_P1_DIV_2 (1 << 13)
664# define VGA1_PD_P1_SHIFT 8
665/** P1 value is 2 greater than this field */
666# define VGA1_PD_P1_MASK (0x1f << 8)
667/** Selects a post divisor of 4 instead of 2. */
668# define VGA0_PD_P2_DIV_4 (1 << 7)
669/** Overrides the p2 post divisor field */
670# define VGA0_PD_P1_DIV_2 (1 << 5)
671# define VGA0_PD_P1_SHIFT 0
672/** P1 value is 2 greater than this field */
673# define VGA0_PD_P1_MASK (0x1f << 0)
674
675/* PCI D state control register */
676#define D_STATE 0x6104
677#define DSPCLK_GATE_D 0x6200
678
679/* I830 CRTC registers */
680#define HTOTAL_A 0x60000
681#define HBLANK_A 0x60004
682#define HSYNC_A 0x60008
683#define VTOTAL_A 0x6000c
684#define VBLANK_A 0x60010
685#define VSYNC_A 0x60014
686#define PIPEASRC 0x6001c
687#define BCLRPAT_A 0x60020
688#define VSYNCSHIFT_A 0x60028
689
690#define HTOTAL_B 0x61000
691#define HBLANK_B 0x61004
692#define HSYNC_B 0x61008
693#define VTOTAL_B 0x6100c
694#define VBLANK_B 0x61010
695#define VSYNC_B 0x61014
696#define PIPEBSRC 0x6101c
697#define BCLRPAT_B 0x61020
698#define VSYNCSHIFT_B 0x61028
699
700#define PP_STATUS 0x61200
701# define PP_ON (1 << 31)
702/**
703 * Indicates that all dependencies of the panel are on:
704 *
705 * - PLL enabled
706 * - pipe enabled
707 * - LVDS/DVOB/DVOC on
708 */
709# define PP_READY (1 << 30)
710# define PP_SEQUENCE_NONE (0 << 28)
711# define PP_SEQUENCE_ON (1 << 28)
712# define PP_SEQUENCE_OFF (2 << 28)
713# define PP_SEQUENCE_MASK 0x30000000
714#define PP_CONTROL 0x61204
715# define POWER_TARGET_ON (1 << 0)
716
717#define LVDSPP_ON 0x61208
718#define LVDSPP_OFF 0x6120c
719#define PP_CYCLE 0x61210
720
721#define PFIT_CONTROL 0x61230
722# define PFIT_ENABLE (1 << 31)
723# define PFIT_PIPE_MASK (3 << 29)
724# define PFIT_PIPE_SHIFT 29
725# define VERT_INTERP_DISABLE (0 << 10)
726# define VERT_INTERP_BILINEAR (1 << 10)
727# define VERT_INTERP_MASK (3 << 10)
728# define VERT_AUTO_SCALE (1 << 9)
729# define HORIZ_INTERP_DISABLE (0 << 6)
730# define HORIZ_INTERP_BILINEAR (1 << 6)
731# define HORIZ_INTERP_MASK (3 << 6)
732# define HORIZ_AUTO_SCALE (1 << 5)
733# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
734
735#define PFIT_PGM_RATIOS 0x61234
736# define PFIT_VERT_SCALE_MASK 0xfff00000
737# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
738
739#define PFIT_AUTO_RATIOS 0x61238
740
741
742#define DPLL_A 0x06014
743#define DPLL_B 0x06018
744# define DPLL_VCO_ENABLE (1 << 31)
745# define DPLL_DVO_HIGH_SPEED (1 << 30)
746# define DPLL_SYNCLOCK_ENABLE (1 << 29)
747# define DPLL_VGA_MODE_DIS (1 << 28)
748# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
749# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
750# define DPLL_MODE_MASK (3 << 26)
751# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
752# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
753# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
754# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
755# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
756# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
757/**
758 * The i830 generation, in DAC/serial mode, defines p1 as two plus this
759 * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
760 */
761# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
762/**
763 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
764 * this field (only one bit may be set).
765 */
766# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
767# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
768# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
769# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
770# define PLL_REF_INPUT_DREFCLK (0 << 13)
771# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
772# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
773# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
774# define PLL_REF_INPUT_MASK (3 << 13)
775# define PLL_LOAD_PULSE_PHASE_SHIFT 9
776/*
777 * Parallel to Serial Load Pulse phase selection.
778 * Selects the phase for the 10X DPLL clock for the PCIe
779 * digital display port. The range is 4 to 13; 10 or more
780 * is just a flip delay. The default is 6
781 */
782# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
783# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
784
785/**
786 * SDVO multiplier for 945G/GM. Not used on 965.
787 *
788 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
789 */
790# define SDVO_MULTIPLIER_MASK 0x000000ff
791# define SDVO_MULTIPLIER_SHIFT_HIRES 4
792# define SDVO_MULTIPLIER_SHIFT_VGA 0
793
794/** @defgroup DPLL_MD
795 * @{
796 */
797/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
798#define DPLL_A_MD 0x0601c
799/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
800#define DPLL_B_MD 0x06020
801/**
802 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
803 *
804 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
805 */
806# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
807# define DPLL_MD_UDI_DIVIDER_SHIFT 24
808/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
809# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
810# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
811/**
812 * SDVO/UDI pixel multiplier.
813 *
814 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
815 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
816 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
817 * dummy bytes in the datastream at an increased clock rate, with both sides of
818 * the link knowing how many bytes are fill.
819 *
820 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
821 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
822 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
823 * through an SDVO command.
824 *
825 * This register field has values of multiplication factor minus 1, with
826 * a maximum multiplier of 5 for SDVO.
827 */
828# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
829# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
830/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
831 * This best be set to the default value (3) or the CRT won't work. No,
832 * I don't entirely understand what this does...
833 */
834# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
835# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
836/** @} */
837
838#define DPLL_TEST 0x606c
839# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
840# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
841# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
842# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
843# define DPLLB_TEST_N_BYPASS (1 << 19)
844# define DPLLB_TEST_M_BYPASS (1 << 18)
845# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
846# define DPLLA_TEST_N_BYPASS (1 << 3)
847# define DPLLA_TEST_M_BYPASS (1 << 2)
848# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
849
850#define ADPA 0x61100
851#define ADPA_DAC_ENABLE (1<<31)
852#define ADPA_DAC_DISABLE 0
853#define ADPA_PIPE_SELECT_MASK (1<<30)
854#define ADPA_PIPE_A_SELECT 0
855#define ADPA_PIPE_B_SELECT (1<<30)
856#define ADPA_USE_VGA_HVPOLARITY (1<<15)
857#define ADPA_SETS_HVPOLARITY 0
858#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
859#define ADPA_VSYNC_CNTL_ENABLE 0
860#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
861#define ADPA_HSYNC_CNTL_ENABLE 0
862#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
863#define ADPA_VSYNC_ACTIVE_LOW 0
864#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
865#define ADPA_HSYNC_ACTIVE_LOW 0
866
867#define FPA0 0x06040
868#define FPA1 0x06044
869#define FPB0 0x06048
870#define FPB1 0x0604c
871# define FP_N_DIV_MASK 0x003f0000
872# define FP_N_DIV_SHIFT 16
873# define FP_M1_DIV_MASK 0x00003f00
874# define FP_M1_DIV_SHIFT 8
875# define FP_M2_DIV_MASK 0x0000003f
876# define FP_M2_DIV_SHIFT 0
877
878
879#define PORT_HOTPLUG_EN 0x61110
880# define SDVOB_HOTPLUG_INT_EN (1 << 26)
881# define SDVOC_HOTPLUG_INT_EN (1 << 25)
882# define TV_HOTPLUG_INT_EN (1 << 18)
883# define CRT_HOTPLUG_INT_EN (1 << 9)
884# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
885
886#define PORT_HOTPLUG_STAT 0x61114
887# define CRT_HOTPLUG_INT_STATUS (1 << 11)
888# define TV_HOTPLUG_INT_STATUS (1 << 10)
889# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
890# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
891# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
892# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
893# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
894# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
895
896#define SDVOB 0x61140
897#define SDVOC 0x61160
898#define SDVO_ENABLE (1 << 31)
899#define SDVO_PIPE_B_SELECT (1 << 30)
900#define SDVO_STALL_SELECT (1 << 29)
901#define SDVO_INTERRUPT_ENABLE (1 << 26)
902/** 597/**
903 * 915G/GM SDVO pixel multiplier. 598 * Reads a dword out of the status page, which is written to from the command
599 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
600 * MI_STORE_DATA_IMM.
904 * 601 *
905 * Programmed value is multiplier - 1, up to 5x. 602 * The following dwords have a reserved meaning:
906 * 603 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
907 * \sa DPLL_MD_UDI_MULTIPLIER_MASK 604 * 0x04: ring 0 head pointer
908 */ 605 * 0x05: ring 1 head pointer (915-class)
909#define SDVO_PORT_MULTIPLY_MASK (7 << 23) 606 * 0x06: ring 2 head pointer (915-class)
910#define SDVO_PORT_MULTIPLY_SHIFT 23 607 * 0x10-0x1b: Context status DWords (GM45)
911#define SDVO_PHASE_SELECT_MASK (15 << 19) 608 * 0x1f: Last written status offset. (GM45)
912#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
913#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
914#define SDVOC_GANG_MODE (1 << 16)
915#define SDVO_BORDER_ENABLE (1 << 7)
916#define SDVOB_PCIE_CONCURRENCY (1 << 3)
917#define SDVO_DETECTED (1 << 2)
918/* Bits to be preserved when writing */
919#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
920#define SDVOC_PRESERVE_MASK (1 << 17)
921
922/** @defgroup LVDS
923 * @{
924 */
925/**
926 * This register controls the LVDS output enable, pipe selection, and data
927 * format selection.
928 * 609 *
929 * All of the clock/data pairs are force powered down by power sequencing. 610 * The area from dword 0x20 to 0x3ff is available for driver usage.
930 */
931#define LVDS 0x61180
932/**
933 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
934 * the DPLL semantics change when the LVDS is assigned to that pipe.
935 */
936# define LVDS_PORT_EN (1 << 31)
937/** Selects pipe B for LVDS data. Must be set on pre-965. */
938# define LVDS_PIPEB_SELECT (1 << 30)
939
940/**
941 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
942 * pixel.
943 */
944# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
945# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
946# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
947/**
948 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
949 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
950 * on.
951 */
952# define LVDS_A3_POWER_MASK (3 << 6)
953# define LVDS_A3_POWER_DOWN (0 << 6)
954# define LVDS_A3_POWER_UP (3 << 6)
955/**
956 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
957 * is set.
958 */
959# define LVDS_CLKB_POWER_MASK (3 << 4)
960# define LVDS_CLKB_POWER_DOWN (0 << 4)
961# define LVDS_CLKB_POWER_UP (3 << 4)
962
963/**
964 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
965 * setting for whether we are in dual-channel mode. The B3 pair will
966 * additionally only be powered up when LVDS_A3_POWER_UP is set.
967 */
968# define LVDS_B0B3_POWER_MASK (3 << 2)
969# define LVDS_B0B3_POWER_DOWN (0 << 2)
970# define LVDS_B0B3_POWER_UP (3 << 2)
971
972#define PIPEACONF 0x70008
973#define PIPEACONF_ENABLE (1<<31)
974#define PIPEACONF_DISABLE 0
975#define PIPEACONF_DOUBLE_WIDE (1<<30)
976#define I965_PIPECONF_ACTIVE (1<<30)
977#define PIPEACONF_SINGLE_WIDE 0
978#define PIPEACONF_PIPE_UNLOCKED 0
979#define PIPEACONF_PIPE_LOCKED (1<<25)
980#define PIPEACONF_PALETTE 0
981#define PIPEACONF_GAMMA (1<<24)
982#define PIPECONF_FORCE_BORDER (1<<25)
983#define PIPECONF_PROGRESSIVE (0 << 21)
984#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
985#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
986
987#define DSPARB 0x70030
988#define DSPARB_CSTART_MASK (0x7f << 7)
989#define DSPARB_CSTART_SHIFT 7
990#define DSPARB_BSTART_MASK (0x7f)
991#define DSPARB_BSTART_SHIFT 0
992
993#define PIPEBCONF 0x71008
994#define PIPEBCONF_ENABLE (1<<31)
995#define PIPEBCONF_DISABLE 0
996#define PIPEBCONF_DOUBLE_WIDE (1<<30)
997#define PIPEBCONF_DISABLE 0
998#define PIPEBCONF_GAMMA (1<<24)
999#define PIPEBCONF_PALETTE 0
1000
1001#define PIPEBGCMAXRED 0x71010
1002#define PIPEBGCMAXGREEN 0x71014
1003#define PIPEBGCMAXBLUE 0x71018
1004#define PIPEBSTAT 0x71024
1005#define PIPEBFRAMEHIGH 0x71040
1006#define PIPEBFRAMEPIXEL 0x71044
1007
1008#define DSPACNTR 0x70180
1009#define DSPBCNTR 0x71180
1010#define DISPLAY_PLANE_ENABLE (1<<31)
1011#define DISPLAY_PLANE_DISABLE 0
1012#define DISPPLANE_GAMMA_ENABLE (1<<30)
1013#define DISPPLANE_GAMMA_DISABLE 0
1014#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
1015#define DISPPLANE_8BPP (0x2<<26)
1016#define DISPPLANE_15_16BPP (0x4<<26)
1017#define DISPPLANE_16BPP (0x5<<26)
1018#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1019#define DISPPLANE_32BPP (0x7<<26)
1020#define DISPPLANE_STEREO_ENABLE (1<<25)
1021#define DISPPLANE_STEREO_DISABLE 0
1022#define DISPPLANE_SEL_PIPE_MASK (1<<24)
1023#define DISPPLANE_SEL_PIPE_A 0
1024#define DISPPLANE_SEL_PIPE_B (1<<24)
1025#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
1026#define DISPPLANE_SRC_KEY_DISABLE 0
1027#define DISPPLANE_LINE_DOUBLE (1<<20)
1028#define DISPPLANE_NO_LINE_DOUBLE 0
1029#define DISPPLANE_STEREO_POLARITY_FIRST 0
1030#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1031/* plane B only */
1032#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
1033#define DISPPLANE_ALPHA_TRANS_DISABLE 0
1034#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
1035#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
1036
1037#define DSPABASE 0x70184
1038#define DSPASTRIDE 0x70188
1039
1040#define DSPBBASE 0x71184
1041#define DSPBADDR DSPBBASE
1042#define DSPBSTRIDE 0x71188
1043
1044#define DSPAKEYVAL 0x70194
1045#define DSPAKEYMASK 0x70198
1046
1047#define DSPAPOS 0x7018C /* reserved */
1048#define DSPASIZE 0x70190
1049#define DSPBPOS 0x7118C
1050#define DSPBSIZE 0x71190
1051
1052#define DSPASURF 0x7019C
1053#define DSPATILEOFF 0x701A4
1054
1055#define DSPBSURF 0x7119C
1056#define DSPBTILEOFF 0x711A4
1057
1058#define VGACNTRL 0x71400
1059# define VGA_DISP_DISABLE (1 << 31)
1060# define VGA_2X_MODE (1 << 30)
1061# define VGA_PIPE_B_SELECT (1 << 29)
1062
1063/*
1064 * Some BIOS scratch area registers. The 845 (and 830?) store the amount
1065 * of video memory available to the BIOS in SWF1.
1066 */
1067
1068#define SWF0 0x71410
1069
1070/*
1071 * 855 scratch registers.
1072 */
1073#define SWF10 0x70410
1074
1075#define SWF30 0x72414
1076
1077/*
1078 * Overlay registers. These are overlay registers accessed via MMIO.
1079 * Those loaded via the overlay register page are defined in i830_video.c.
1080 */ 611 */
1081#define OVADD 0x30000 612#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
1082 613#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
1083#define DOVSTA 0x30008 614#define I915_GEM_HWS_INDEX 0x20
1084#define OC_BUF (0x3<<20)
1085 615
1086#define OGAMC5 0x30010 616extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1087#define OGAMC4 0x30014
1088#define OGAMC3 0x30018
1089#define OGAMC2 0x3001c
1090#define OGAMC1 0x30020
1091#define OGAMC0 0x30024
1092/*
1093 * Palette registers
1094 */
1095#define PALETTE_A 0x0a000
1096#define PALETTE_B 0x0a800
1097 617
1098#define IS_I830(dev) ((dev)->pci_device == 0x3577) 618#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1099#define IS_845G(dev) ((dev)->pci_device == 0x2562) 619#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1119,7 +639,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1119 639
1120#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 640#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
1121 641
1122#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42) 642#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1123 643
1124#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 644#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1125 (dev)->pci_device == 0x2E12 || \ 645 (dev)->pci_device == 0x2E12 || \
@@ -1133,9 +653,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1133 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 653 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1134 654
1135#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 655#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1136 IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) 656 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
1137 657
1138#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev)) 658#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
1139 659
1140#define PRIMARY_RINGBUFFER_SIZE (128*1024) 660#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1141 661
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
new file mode 100644
index 000000000000..9ac73dd1b422
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -0,0 +1,2558 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
33
34static int
35i915_gem_object_set_domain(struct drm_gem_object *obj,
36 uint32_t read_domains,
37 uint32_t write_domain);
38static int
39i915_gem_object_set_domain_range(struct drm_gem_object *obj,
40 uint64_t offset,
41 uint64_t size,
42 uint32_t read_domains,
43 uint32_t write_domain);
44static int
45i915_gem_set_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv,
47 uint32_t read_domains,
48 uint32_t write_domain);
49static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
52
53static void
54i915_gem_cleanup_ringbuffer(struct drm_device *dev);
55
56int
57i915_gem_init_ioctl(struct drm_device *dev, void *data,
58 struct drm_file *file_priv)
59{
60 drm_i915_private_t *dev_priv = dev->dev_private;
61 struct drm_i915_gem_init *args = data;
62
63 mutex_lock(&dev->struct_mutex);
64
65 if (args->gtt_start >= args->gtt_end ||
66 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
67 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
68 mutex_unlock(&dev->struct_mutex);
69 return -EINVAL;
70 }
71
72 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
73 args->gtt_end - args->gtt_start);
74
75 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
76
77 mutex_unlock(&dev->struct_mutex);
78
79 return 0;
80}
81
82
83/**
84 * Creates a new mm object and returns a handle to it.
85 */
86int
87i915_gem_create_ioctl(struct drm_device *dev, void *data,
88 struct drm_file *file_priv)
89{
90 struct drm_i915_gem_create *args = data;
91 struct drm_gem_object *obj;
92 int handle, ret;
93
94 args->size = roundup(args->size, PAGE_SIZE);
95
96 /* Allocate the new object */
97 obj = drm_gem_object_alloc(dev, args->size);
98 if (obj == NULL)
99 return -ENOMEM;
100
101 ret = drm_gem_handle_create(file_priv, obj, &handle);
102 mutex_lock(&dev->struct_mutex);
103 drm_gem_object_handle_unreference(obj);
104 mutex_unlock(&dev->struct_mutex);
105
106 if (ret)
107 return ret;
108
109 args->handle = handle;
110
111 return 0;
112}
113
114/**
115 * Reads data from the object referenced by handle.
116 *
117 * On error, the contents of *data are undefined.
118 */
119int
120i915_gem_pread_ioctl(struct drm_device *dev, void *data,
121 struct drm_file *file_priv)
122{
123 struct drm_i915_gem_pread *args = data;
124 struct drm_gem_object *obj;
125 struct drm_i915_gem_object *obj_priv;
126 ssize_t read;
127 loff_t offset;
128 int ret;
129
130 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
131 if (obj == NULL)
132 return -EBADF;
133 obj_priv = obj->driver_private;
134
135 /* Bounds check source.
136 *
137 * XXX: This could use review for overflow issues...
138 */
139 if (args->offset > obj->size || args->size > obj->size ||
140 args->offset + args->size > obj->size) {
141 drm_gem_object_unreference(obj);
142 return -EINVAL;
143 }
144
145 mutex_lock(&dev->struct_mutex);
146
147 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
148 I915_GEM_DOMAIN_CPU, 0);
149 if (ret != 0) {
150 drm_gem_object_unreference(obj);
151 mutex_unlock(&dev->struct_mutex);
152 return ret;
153 }
154
155 offset = args->offset;
156
157 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
158 args->size, &offset);
159 if (read != args->size) {
160 drm_gem_object_unreference(obj);
161 mutex_unlock(&dev->struct_mutex);
162 if (read < 0)
163 return read;
164 else
165 return -EINVAL;
166 }
167
168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex);
170
171 return 0;
172}
173
174static int
175i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
176 struct drm_i915_gem_pwrite *args,
177 struct drm_file *file_priv)
178{
179 struct drm_i915_gem_object *obj_priv = obj->driver_private;
180 ssize_t remain;
181 loff_t offset;
182 char __user *user_data;
183 char __iomem *vaddr;
184 char *vaddr_atomic;
185 int i, o, l;
186 int ret = 0;
187 unsigned long pfn;
188 unsigned long unwritten;
189
190 user_data = (char __user *) (uintptr_t) args->data_ptr;
191 remain = args->size;
192 if (!access_ok(VERIFY_READ, user_data, remain))
193 return -EFAULT;
194
195
196 mutex_lock(&dev->struct_mutex);
197 ret = i915_gem_object_pin(obj, 0);
198 if (ret) {
199 mutex_unlock(&dev->struct_mutex);
200 return ret;
201 }
202 ret = i915_gem_set_domain(obj, file_priv,
203 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
204 if (ret)
205 goto fail;
206
207 obj_priv = obj->driver_private;
208 offset = obj_priv->gtt_offset + args->offset;
209 obj_priv->dirty = 1;
210
211 while (remain > 0) {
212 /* Operation in this page
213 *
214 * i = page number
215 * o = offset within page
216 * l = bytes to copy
217 */
218 i = offset >> PAGE_SHIFT;
219 o = offset & (PAGE_SIZE-1);
220 l = remain;
221 if ((o + l) > PAGE_SIZE)
222 l = PAGE_SIZE - o;
223
224 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
225
226#ifdef CONFIG_HIGHMEM
227 /* This is a workaround for the low performance of iounmap
228 * (approximate 10% cpu cost on normal 3D workloads).
229 * kmap_atomic on HIGHMEM kernels happens to let us map card
230 * memory without taking IPIs. When the vmap rework lands
231 * we should be able to dump this hack.
232 */
233 vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
234#if WATCH_PWRITE
235 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
236 i, o, l, pfn, vaddr_atomic);
237#endif
238 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
239 user_data, l);
240 kunmap_atomic(vaddr_atomic, KM_USER0);
241
242 if (unwritten)
243#endif /* CONFIG_HIGHMEM */
244 {
245 vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
246#if WATCH_PWRITE
247 DRM_INFO("pwrite slow i %d o %d l %d "
248 "pfn %ld vaddr %p\n",
249 i, o, l, pfn, vaddr);
250#endif
251 if (vaddr == NULL) {
252 ret = -EFAULT;
253 goto fail;
254 }
255 unwritten = __copy_from_user(vaddr + o, user_data, l);
256#if WATCH_PWRITE
257 DRM_INFO("unwritten %ld\n", unwritten);
258#endif
259 iounmap(vaddr);
260 if (unwritten) {
261 ret = -EFAULT;
262 goto fail;
263 }
264 }
265
266 remain -= l;
267 user_data += l;
268 offset += l;
269 }
270#if WATCH_PWRITE && 1
271 i915_gem_clflush_object(obj);
272 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
273 i915_gem_clflush_object(obj);
274#endif
275
276fail:
277 i915_gem_object_unpin(obj);
278 mutex_unlock(&dev->struct_mutex);
279
280 return ret;
281}
282
283static int
284i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
285 struct drm_i915_gem_pwrite *args,
286 struct drm_file *file_priv)
287{
288 int ret;
289 loff_t offset;
290 ssize_t written;
291
292 mutex_lock(&dev->struct_mutex);
293
294 ret = i915_gem_set_domain(obj, file_priv,
295 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
296 if (ret) {
297 mutex_unlock(&dev->struct_mutex);
298 return ret;
299 }
300
301 offset = args->offset;
302
303 written = vfs_write(obj->filp,
304 (char __user *)(uintptr_t) args->data_ptr,
305 args->size, &offset);
306 if (written != args->size) {
307 mutex_unlock(&dev->struct_mutex);
308 if (written < 0)
309 return written;
310 else
311 return -EINVAL;
312 }
313
314 mutex_unlock(&dev->struct_mutex);
315
316 return 0;
317}
318
319/**
320 * Writes data to the object referenced by handle.
321 *
322 * On error, the contents of the buffer that were to be modified are undefined.
323 */
324int
325i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327{
328 struct drm_i915_gem_pwrite *args = data;
329 struct drm_gem_object *obj;
330 struct drm_i915_gem_object *obj_priv;
331 int ret = 0;
332
333 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
334 if (obj == NULL)
335 return -EBADF;
336 obj_priv = obj->driver_private;
337
338 /* Bounds check destination.
339 *
340 * XXX: This could use review for overflow issues...
341 */
342 if (args->offset > obj->size || args->size > obj->size ||
343 args->offset + args->size > obj->size) {
344 drm_gem_object_unreference(obj);
345 return -EINVAL;
346 }
347
348 /* We can only do the GTT pwrite on untiled buffers, as otherwise
349 * it would end up going through the fenced access, and we'll get
350 * different detiling behavior between reading and writing.
351 * pread/pwrite currently are reading and writing from the CPU
352 * perspective, requiring manual detiling by the client.
353 */
354 if (obj_priv->tiling_mode == I915_TILING_NONE &&
355 dev->gtt_total != 0)
356 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
357 else
358 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
359
360#if WATCH_PWRITE
361 if (ret)
362 DRM_INFO("pwrite failed %d\n", ret);
363#endif
364
365 drm_gem_object_unreference(obj);
366
367 return ret;
368}
369
370/**
371 * Called when user space prepares to use an object
372 */
373int
374i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
375 struct drm_file *file_priv)
376{
377 struct drm_i915_gem_set_domain *args = data;
378 struct drm_gem_object *obj;
379 int ret;
380
381 if (!(dev->driver->driver_features & DRIVER_GEM))
382 return -ENODEV;
383
384 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
385 if (obj == NULL)
386 return -EBADF;
387
388 mutex_lock(&dev->struct_mutex);
389#if WATCH_BUF
390 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
391 obj, obj->size, args->read_domains, args->write_domain);
392#endif
393 ret = i915_gem_set_domain(obj, file_priv,
394 args->read_domains, args->write_domain);
395 drm_gem_object_unreference(obj);
396 mutex_unlock(&dev->struct_mutex);
397 return ret;
398}
399
400/**
401 * Called when user space has done writes to this buffer
402 */
403int
404i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
405 struct drm_file *file_priv)
406{
407 struct drm_i915_gem_sw_finish *args = data;
408 struct drm_gem_object *obj;
409 struct drm_i915_gem_object *obj_priv;
410 int ret = 0;
411
412 if (!(dev->driver->driver_features & DRIVER_GEM))
413 return -ENODEV;
414
415 mutex_lock(&dev->struct_mutex);
416 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
417 if (obj == NULL) {
418 mutex_unlock(&dev->struct_mutex);
419 return -EBADF;
420 }
421
422#if WATCH_BUF
423 DRM_INFO("%s: sw_finish %d (%p %d)\n",
424 __func__, args->handle, obj, obj->size);
425#endif
426 obj_priv = obj->driver_private;
427
428 /* Pinned buffers may be scanout, so flush the cache */
429 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
430 i915_gem_clflush_object(obj);
431 drm_agp_chipset_flush(dev);
432 }
433 drm_gem_object_unreference(obj);
434 mutex_unlock(&dev->struct_mutex);
435 return ret;
436}
437
438/**
439 * Maps the contents of an object, returning the address it is mapped
440 * into.
441 *
442 * While the mapping holds a reference on the contents of the object, it doesn't
443 * imply a ref on the object itself.
444 */
445int
446i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
447 struct drm_file *file_priv)
448{
449 struct drm_i915_gem_mmap *args = data;
450 struct drm_gem_object *obj;
451 loff_t offset;
452 unsigned long addr;
453
454 if (!(dev->driver->driver_features & DRIVER_GEM))
455 return -ENODEV;
456
457 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
458 if (obj == NULL)
459 return -EBADF;
460
461 offset = args->offset;
462
463 down_write(&current->mm->mmap_sem);
464 addr = do_mmap(obj->filp, 0, args->size,
465 PROT_READ | PROT_WRITE, MAP_SHARED,
466 args->offset);
467 up_write(&current->mm->mmap_sem);
468 mutex_lock(&dev->struct_mutex);
469 drm_gem_object_unreference(obj);
470 mutex_unlock(&dev->struct_mutex);
471 if (IS_ERR((void *)addr))
472 return addr;
473
474 args->addr_ptr = (uint64_t) addr;
475
476 return 0;
477}
478
479static void
480i915_gem_object_free_page_list(struct drm_gem_object *obj)
481{
482 struct drm_i915_gem_object *obj_priv = obj->driver_private;
483 int page_count = obj->size / PAGE_SIZE;
484 int i;
485
486 if (obj_priv->page_list == NULL)
487 return;
488
489
490 for (i = 0; i < page_count; i++)
491 if (obj_priv->page_list[i] != NULL) {
492 if (obj_priv->dirty)
493 set_page_dirty(obj_priv->page_list[i]);
494 mark_page_accessed(obj_priv->page_list[i]);
495 page_cache_release(obj_priv->page_list[i]);
496 }
497 obj_priv->dirty = 0;
498
499 drm_free(obj_priv->page_list,
500 page_count * sizeof(struct page *),
501 DRM_MEM_DRIVER);
502 obj_priv->page_list = NULL;
503}
504
505static void
506i915_gem_object_move_to_active(struct drm_gem_object *obj)
507{
508 struct drm_device *dev = obj->dev;
509 drm_i915_private_t *dev_priv = dev->dev_private;
510 struct drm_i915_gem_object *obj_priv = obj->driver_private;
511
512 /* Add a reference if we're newly entering the active list. */
513 if (!obj_priv->active) {
514 drm_gem_object_reference(obj);
515 obj_priv->active = 1;
516 }
517 /* Move from whatever list we were on to the tail of execution. */
518 list_move_tail(&obj_priv->list,
519 &dev_priv->mm.active_list);
520}
521
522
523static void
524i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
525{
526 struct drm_device *dev = obj->dev;
527 drm_i915_private_t *dev_priv = dev->dev_private;
528 struct drm_i915_gem_object *obj_priv = obj->driver_private;
529
530 i915_verify_inactive(dev, __FILE__, __LINE__);
531 if (obj_priv->pin_count != 0)
532 list_del_init(&obj_priv->list);
533 else
534 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
535
536 if (obj_priv->active) {
537 obj_priv->active = 0;
538 drm_gem_object_unreference(obj);
539 }
540 i915_verify_inactive(dev, __FILE__, __LINE__);
541}
542
543/**
544 * Creates a new sequence number, emitting a write of it to the status page
545 * plus an interrupt, which will trigger i915_user_interrupt_handler.
546 *
547 * Must be called with struct_lock held.
548 *
549 * Returned sequence numbers are nonzero on success.
550 */
551static uint32_t
552i915_add_request(struct drm_device *dev, uint32_t flush_domains)
553{
554 drm_i915_private_t *dev_priv = dev->dev_private;
555 struct drm_i915_gem_request *request;
556 uint32_t seqno;
557 int was_empty;
558 RING_LOCALS;
559
560 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
561 if (request == NULL)
562 return 0;
563
564 /* Grab the seqno we're going to make this request be, and bump the
565 * next (skipping 0 so it can be the reserved no-seqno value).
566 */
567 seqno = dev_priv->mm.next_gem_seqno;
568 dev_priv->mm.next_gem_seqno++;
569 if (dev_priv->mm.next_gem_seqno == 0)
570 dev_priv->mm.next_gem_seqno++;
571
572 BEGIN_LP_RING(4);
573 OUT_RING(MI_STORE_DWORD_INDEX);
574 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
575 OUT_RING(seqno);
576
577 OUT_RING(MI_USER_INTERRUPT);
578 ADVANCE_LP_RING();
579
580 DRM_DEBUG("%d\n", seqno);
581
582 request->seqno = seqno;
583 request->emitted_jiffies = jiffies;
584 request->flush_domains = flush_domains;
585 was_empty = list_empty(&dev_priv->mm.request_list);
586 list_add_tail(&request->list, &dev_priv->mm.request_list);
587
588 if (was_empty && !dev_priv->mm.suspended)
589 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
590 return seqno;
591}
592
593/**
594 * Command execution barrier
595 *
596 * Ensures that all commands in the ring are finished
597 * before signalling the CPU
598 */
599static uint32_t
600i915_retire_commands(struct drm_device *dev)
601{
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
604 uint32_t flush_domains = 0;
605 RING_LOCALS;
606
607 /* The sampler always gets flushed on i965 (sigh) */
608 if (IS_I965G(dev))
609 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
610 BEGIN_LP_RING(2);
611 OUT_RING(cmd);
612 OUT_RING(0); /* noop */
613 ADVANCE_LP_RING();
614 return flush_domains;
615}
616
617/**
618 * Moves buffers associated only with the given active seqno from the active
619 * to inactive list, potentially freeing them.
620 */
621static void
622i915_gem_retire_request(struct drm_device *dev,
623 struct drm_i915_gem_request *request)
624{
625 drm_i915_private_t *dev_priv = dev->dev_private;
626
627 /* Move any buffers on the active list that are no longer referenced
628 * by the ringbuffer to the flushing/inactive lists as appropriate.
629 */
630 while (!list_empty(&dev_priv->mm.active_list)) {
631 struct drm_gem_object *obj;
632 struct drm_i915_gem_object *obj_priv;
633
634 obj_priv = list_first_entry(&dev_priv->mm.active_list,
635 struct drm_i915_gem_object,
636 list);
637 obj = obj_priv->obj;
638
639 /* If the seqno being retired doesn't match the oldest in the
640 * list, then the oldest in the list must still be newer than
641 * this seqno.
642 */
643 if (obj_priv->last_rendering_seqno != request->seqno)
644 return;
645#if WATCH_LRU
646 DRM_INFO("%s: retire %d moves to inactive list %p\n",
647 __func__, request->seqno, obj);
648#endif
649
650 if (obj->write_domain != 0) {
651 list_move_tail(&obj_priv->list,
652 &dev_priv->mm.flushing_list);
653 } else {
654 i915_gem_object_move_to_inactive(obj);
655 }
656 }
657
658 if (request->flush_domains != 0) {
659 struct drm_i915_gem_object *obj_priv, *next;
660
661 /* Clear the write domain and activity from any buffers
662 * that are just waiting for a flush matching the one retired.
663 */
664 list_for_each_entry_safe(obj_priv, next,
665 &dev_priv->mm.flushing_list, list) {
666 struct drm_gem_object *obj = obj_priv->obj;
667
668 if (obj->write_domain & request->flush_domains) {
669 obj->write_domain = 0;
670 i915_gem_object_move_to_inactive(obj);
671 }
672 }
673
674 }
675}
676
677/**
678 * Returns true if seq1 is later than seq2.
679 */
680static int
681i915_seqno_passed(uint32_t seq1, uint32_t seq2)
682{
683 return (int32_t)(seq1 - seq2) >= 0;
684}
685
686uint32_t
687i915_get_gem_seqno(struct drm_device *dev)
688{
689 drm_i915_private_t *dev_priv = dev->dev_private;
690
691 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
692}
693
694/**
695 * This function clears the request list as sequence numbers are passed.
696 */
697void
698i915_gem_retire_requests(struct drm_device *dev)
699{
700 drm_i915_private_t *dev_priv = dev->dev_private;
701 uint32_t seqno;
702
703 seqno = i915_get_gem_seqno(dev);
704
705 while (!list_empty(&dev_priv->mm.request_list)) {
706 struct drm_i915_gem_request *request;
707 uint32_t retiring_seqno;
708
709 request = list_first_entry(&dev_priv->mm.request_list,
710 struct drm_i915_gem_request,
711 list);
712 retiring_seqno = request->seqno;
713
714 if (i915_seqno_passed(seqno, retiring_seqno) ||
715 dev_priv->mm.wedged) {
716 i915_gem_retire_request(dev, request);
717
718 list_del(&request->list);
719 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
720 } else
721 break;
722 }
723}
724
725void
726i915_gem_retire_work_handler(struct work_struct *work)
727{
728 drm_i915_private_t *dev_priv;
729 struct drm_device *dev;
730
731 dev_priv = container_of(work, drm_i915_private_t,
732 mm.retire_work.work);
733 dev = dev_priv->dev;
734
735 mutex_lock(&dev->struct_mutex);
736 i915_gem_retire_requests(dev);
737 if (!dev_priv->mm.suspended &&
738 !list_empty(&dev_priv->mm.request_list))
739 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
740 mutex_unlock(&dev->struct_mutex);
741}
742
743/**
744 * Waits for a sequence number to be signaled, and cleans up the
745 * request and object lists appropriately for that event.
746 */
747static int
748i915_wait_request(struct drm_device *dev, uint32_t seqno)
749{
750 drm_i915_private_t *dev_priv = dev->dev_private;
751 int ret = 0;
752
753 BUG_ON(seqno == 0);
754
755 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
756 dev_priv->mm.waiting_gem_seqno = seqno;
757 i915_user_irq_get(dev);
758 ret = wait_event_interruptible(dev_priv->irq_queue,
759 i915_seqno_passed(i915_get_gem_seqno(dev),
760 seqno) ||
761 dev_priv->mm.wedged);
762 i915_user_irq_put(dev);
763 dev_priv->mm.waiting_gem_seqno = 0;
764 }
765 if (dev_priv->mm.wedged)
766 ret = -EIO;
767
768 if (ret && ret != -ERESTARTSYS)
769 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
770 __func__, ret, seqno, i915_get_gem_seqno(dev));
771
772 /* Directly dispatch request retiring. While we have the work queue
773 * to handle this, the waiter on a request often wants an associated
774 * buffer to have made it to the inactive list, and we would need
775 * a separate wait queue to handle that.
776 */
777 if (ret == 0)
778 i915_gem_retire_requests(dev);
779
780 return ret;
781}
782
783static void
784i915_gem_flush(struct drm_device *dev,
785 uint32_t invalidate_domains,
786 uint32_t flush_domains)
787{
788 drm_i915_private_t *dev_priv = dev->dev_private;
789 uint32_t cmd;
790 RING_LOCALS;
791
792#if WATCH_EXEC
793 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
794 invalidate_domains, flush_domains);
795#endif
796
797 if (flush_domains & I915_GEM_DOMAIN_CPU)
798 drm_agp_chipset_flush(dev);
799
800 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
801 I915_GEM_DOMAIN_GTT)) {
802 /*
803 * read/write caches:
804 *
805 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
806 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
807 * also flushed at 2d versus 3d pipeline switches.
808 *
809 * read-only caches:
810 *
811 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
812 * MI_READ_FLUSH is set, and is always flushed on 965.
813 *
814 * I915_GEM_DOMAIN_COMMAND may not exist?
815 *
816 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
817 * invalidated when MI_EXE_FLUSH is set.
818 *
819 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
820 * invalidated with every MI_FLUSH.
821 *
822 * TLBs:
823 *
824 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
825 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
826 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
827 * are flushed at any MI_FLUSH.
828 */
829
830 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
831 if ((invalidate_domains|flush_domains) &
832 I915_GEM_DOMAIN_RENDER)
833 cmd &= ~MI_NO_WRITE_FLUSH;
834 if (!IS_I965G(dev)) {
835 /*
836 * On the 965, the sampler cache always gets flushed
837 * and this bit is reserved.
838 */
839 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
840 cmd |= MI_READ_FLUSH;
841 }
842 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
843 cmd |= MI_EXE_FLUSH;
844
845#if WATCH_EXEC
846 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
847#endif
848 BEGIN_LP_RING(2);
849 OUT_RING(cmd);
850 OUT_RING(0); /* noop */
851 ADVANCE_LP_RING();
852 }
853}
854
855/**
856 * Ensures that all rendering to the object has completed and the object is
857 * safe to unbind from the GTT or access from the CPU.
858 */
859static int
860i915_gem_object_wait_rendering(struct drm_gem_object *obj)
861{
862 struct drm_device *dev = obj->dev;
863 struct drm_i915_gem_object *obj_priv = obj->driver_private;
864 int ret;
865
866 /* If there are writes queued to the buffer, flush and
867 * create a new seqno to wait for.
868 */
869 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
870 uint32_t write_domain = obj->write_domain;
871#if WATCH_BUF
872 DRM_INFO("%s: flushing object %p from write domain %08x\n",
873 __func__, obj, write_domain);
874#endif
875 i915_gem_flush(dev, 0, write_domain);
876
877 i915_gem_object_move_to_active(obj);
878 obj_priv->last_rendering_seqno = i915_add_request(dev,
879 write_domain);
880 BUG_ON(obj_priv->last_rendering_seqno == 0);
881#if WATCH_LRU
882 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
883#endif
884 }
885
886 /* If there is rendering queued on the buffer being evicted, wait for
887 * it.
888 */
889 if (obj_priv->active) {
890#if WATCH_BUF
891 DRM_INFO("%s: object %p wait for seqno %08x\n",
892 __func__, obj, obj_priv->last_rendering_seqno);
893#endif
894 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
895 if (ret != 0)
896 return ret;
897 }
898
899 return 0;
900}
901
902/**
903 * Unbinds an object from the GTT aperture.
904 */
905static int
906i915_gem_object_unbind(struct drm_gem_object *obj)
907{
908 struct drm_device *dev = obj->dev;
909 struct drm_i915_gem_object *obj_priv = obj->driver_private;
910 int ret = 0;
911
912#if WATCH_BUF
913 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
914 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
915#endif
916 if (obj_priv->gtt_space == NULL)
917 return 0;
918
919 if (obj_priv->pin_count != 0) {
920 DRM_ERROR("Attempting to unbind pinned buffer\n");
921 return -EINVAL;
922 }
923
924 /* Wait for any rendering to complete
925 */
926 ret = i915_gem_object_wait_rendering(obj);
927 if (ret) {
928 DRM_ERROR("wait_rendering failed: %d\n", ret);
929 return ret;
930 }
931
932 /* Move the object to the CPU domain to ensure that
933 * any possible CPU writes while it's not in the GTT
934 * are flushed when we go to remap it. This will
935 * also ensure that all pending GPU writes are finished
936 * before we unbind.
937 */
938 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
939 I915_GEM_DOMAIN_CPU);
940 if (ret) {
941 DRM_ERROR("set_domain failed: %d\n", ret);
942 return ret;
943 }
944
945 if (obj_priv->agp_mem != NULL) {
946 drm_unbind_agp(obj_priv->agp_mem);
947 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
948 obj_priv->agp_mem = NULL;
949 }
950
951 BUG_ON(obj_priv->active);
952
953 i915_gem_object_free_page_list(obj);
954
955 if (obj_priv->gtt_space) {
956 atomic_dec(&dev->gtt_count);
957 atomic_sub(obj->size, &dev->gtt_memory);
958
959 drm_mm_put_block(obj_priv->gtt_space);
960 obj_priv->gtt_space = NULL;
961 }
962
963 /* Remove ourselves from the LRU list if present. */
964 if (!list_empty(&obj_priv->list))
965 list_del_init(&obj_priv->list);
966
967 return 0;
968}
969
970static int
971i915_gem_evict_something(struct drm_device *dev)
972{
973 drm_i915_private_t *dev_priv = dev->dev_private;
974 struct drm_gem_object *obj;
975 struct drm_i915_gem_object *obj_priv;
976 int ret = 0;
977
978 for (;;) {
979 /* If there's an inactive buffer available now, grab it
980 * and be done.
981 */
982 if (!list_empty(&dev_priv->mm.inactive_list)) {
983 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
984 struct drm_i915_gem_object,
985 list);
986 obj = obj_priv->obj;
987 BUG_ON(obj_priv->pin_count != 0);
988#if WATCH_LRU
989 DRM_INFO("%s: evicting %p\n", __func__, obj);
990#endif
991 BUG_ON(obj_priv->active);
992
993 /* Wait on the rendering and unbind the buffer. */
994 ret = i915_gem_object_unbind(obj);
995 break;
996 }
997
998 /* If we didn't get anything, but the ring is still processing
999 * things, wait for one of those things to finish and hopefully
1000 * leave us a buffer to evict.
1001 */
1002 if (!list_empty(&dev_priv->mm.request_list)) {
1003 struct drm_i915_gem_request *request;
1004
1005 request = list_first_entry(&dev_priv->mm.request_list,
1006 struct drm_i915_gem_request,
1007 list);
1008
1009 ret = i915_wait_request(dev, request->seqno);
1010 if (ret)
1011 break;
1012
1013 /* if waiting caused an object to become inactive,
1014 * then loop around and wait for it. Otherwise, we
1015 * assume that waiting freed and unbound something,
1016 * so there should now be some space in the GTT
1017 */
1018 if (!list_empty(&dev_priv->mm.inactive_list))
1019 continue;
1020 break;
1021 }
1022
1023 /* If we didn't have anything on the request list but there
1024 * are buffers awaiting a flush, emit one and try again.
1025 * When we wait on it, those buffers waiting for that flush
1026 * will get moved to inactive.
1027 */
1028 if (!list_empty(&dev_priv->mm.flushing_list)) {
1029 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1030 struct drm_i915_gem_object,
1031 list);
1032 obj = obj_priv->obj;
1033
1034 i915_gem_flush(dev,
1035 obj->write_domain,
1036 obj->write_domain);
1037 i915_add_request(dev, obj->write_domain);
1038
1039 obj = NULL;
1040 continue;
1041 }
1042
1043 DRM_ERROR("inactive empty %d request empty %d "
1044 "flushing empty %d\n",
1045 list_empty(&dev_priv->mm.inactive_list),
1046 list_empty(&dev_priv->mm.request_list),
1047 list_empty(&dev_priv->mm.flushing_list));
1048 /* If we didn't do any of the above, there's nothing to be done
1049 * and we just can't fit it in.
1050 */
1051 return -ENOMEM;
1052 }
1053 return ret;
1054}
1055
1056static int
1057i915_gem_object_get_page_list(struct drm_gem_object *obj)
1058{
1059 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1060 int page_count, i;
1061 struct address_space *mapping;
1062 struct inode *inode;
1063 struct page *page;
1064 int ret;
1065
1066 if (obj_priv->page_list)
1067 return 0;
1068
1069 /* Get the list of pages out of our struct file. They'll be pinned
1070 * at this point until we release them.
1071 */
1072 page_count = obj->size / PAGE_SIZE;
1073 BUG_ON(obj_priv->page_list != NULL);
1074 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1075 DRM_MEM_DRIVER);
1076 if (obj_priv->page_list == NULL) {
1077 DRM_ERROR("Faled to allocate page list\n");
1078 return -ENOMEM;
1079 }
1080
1081 inode = obj->filp->f_path.dentry->d_inode;
1082 mapping = inode->i_mapping;
1083 for (i = 0; i < page_count; i++) {
1084 page = read_mapping_page(mapping, i, NULL);
1085 if (IS_ERR(page)) {
1086 ret = PTR_ERR(page);
1087 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1088 i915_gem_object_free_page_list(obj);
1089 return ret;
1090 }
1091 obj_priv->page_list[i] = page;
1092 }
1093 return 0;
1094}
1095
1096/**
1097 * Finds free space in the GTT aperture and binds the object there.
1098 */
1099static int
1100i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1101{
1102 struct drm_device *dev = obj->dev;
1103 drm_i915_private_t *dev_priv = dev->dev_private;
1104 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1105 struct drm_mm_node *free_space;
1106 int page_count, ret;
1107
1108 if (alignment == 0)
1109 alignment = PAGE_SIZE;
1110 if (alignment & (PAGE_SIZE - 1)) {
1111 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1112 return -EINVAL;
1113 }
1114
1115 search_free:
1116 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1117 obj->size, alignment, 0);
1118 if (free_space != NULL) {
1119 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1120 alignment);
1121 if (obj_priv->gtt_space != NULL) {
1122 obj_priv->gtt_space->private = obj;
1123 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1124 }
1125 }
1126 if (obj_priv->gtt_space == NULL) {
1127 /* If the gtt is empty and we're still having trouble
1128 * fitting our object in, we're out of memory.
1129 */
1130#if WATCH_LRU
1131 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1132#endif
1133 if (list_empty(&dev_priv->mm.inactive_list) &&
1134 list_empty(&dev_priv->mm.flushing_list) &&
1135 list_empty(&dev_priv->mm.active_list)) {
1136 DRM_ERROR("GTT full, but LRU list empty\n");
1137 return -ENOMEM;
1138 }
1139
1140 ret = i915_gem_evict_something(dev);
1141 if (ret != 0) {
1142 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1143 return ret;
1144 }
1145 goto search_free;
1146 }
1147
1148#if WATCH_BUF
1149 DRM_INFO("Binding object of size %d at 0x%08x\n",
1150 obj->size, obj_priv->gtt_offset);
1151#endif
1152 ret = i915_gem_object_get_page_list(obj);
1153 if (ret) {
1154 drm_mm_put_block(obj_priv->gtt_space);
1155 obj_priv->gtt_space = NULL;
1156 return ret;
1157 }
1158
1159 page_count = obj->size / PAGE_SIZE;
1160 /* Create an AGP memory structure pointing at our pages, and bind it
1161 * into the GTT.
1162 */
1163 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1164 obj_priv->page_list,
1165 page_count,
1166 obj_priv->gtt_offset,
1167 obj_priv->agp_type);
1168 if (obj_priv->agp_mem == NULL) {
1169 i915_gem_object_free_page_list(obj);
1170 drm_mm_put_block(obj_priv->gtt_space);
1171 obj_priv->gtt_space = NULL;
1172 return -ENOMEM;
1173 }
1174 atomic_inc(&dev->gtt_count);
1175 atomic_add(obj->size, &dev->gtt_memory);
1176
1177 /* Assert that the object is not currently in any GPU domain. As it
1178 * wasn't in the GTT, there shouldn't be any way it could have been in
1179 * a GPU cache
1180 */
1181 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1182 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1183
1184 return 0;
1185}
1186
1187void
1188i915_gem_clflush_object(struct drm_gem_object *obj)
1189{
1190 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1191
1192 /* If we don't have a page list set up, then we're not pinned
1193 * to GPU, and we can ignore the cache flush because it'll happen
1194 * again at bind time.
1195 */
1196 if (obj_priv->page_list == NULL)
1197 return;
1198
1199 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1200}
1201
1202/*
1203 * Set the next domain for the specified object. This
1204 * may not actually perform the necessary flushing/invaliding though,
1205 * as that may want to be batched with other set_domain operations
1206 *
1207 * This is (we hope) the only really tricky part of gem. The goal
1208 * is fairly simple -- track which caches hold bits of the object
1209 * and make sure they remain coherent. A few concrete examples may
1210 * help to explain how it works. For shorthand, we use the notation
1211 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1212 * a pair of read and write domain masks.
1213 *
1214 * Case 1: the batch buffer
1215 *
1216 * 1. Allocated
1217 * 2. Written by CPU
1218 * 3. Mapped to GTT
1219 * 4. Read by GPU
1220 * 5. Unmapped from GTT
1221 * 6. Freed
1222 *
1223 * Let's take these a step at a time
1224 *
1225 * 1. Allocated
1226 * Pages allocated from the kernel may still have
1227 * cache contents, so we set them to (CPU, CPU) always.
1228 * 2. Written by CPU (using pwrite)
1229 * The pwrite function calls set_domain (CPU, CPU) and
1230 * this function does nothing (as nothing changes)
1231 * 3. Mapped by GTT
1232 * This function asserts that the object is not
1233 * currently in any GPU-based read or write domains
1234 * 4. Read by GPU
1235 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1236 * As write_domain is zero, this function adds in the
1237 * current read domains (CPU+COMMAND, 0).
1238 * flush_domains is set to CPU.
1239 * invalidate_domains is set to COMMAND
1240 * clflush is run to get data out of the CPU caches
1241 * then i915_dev_set_domain calls i915_gem_flush to
1242 * emit an MI_FLUSH and drm_agp_chipset_flush
1243 * 5. Unmapped from GTT
1244 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1245 * flush_domains and invalidate_domains end up both zero
1246 * so no flushing/invalidating happens
1247 * 6. Freed
1248 * yay, done
1249 *
1250 * Case 2: The shared render buffer
1251 *
1252 * 1. Allocated
1253 * 2. Mapped to GTT
1254 * 3. Read/written by GPU
1255 * 4. set_domain to (CPU,CPU)
1256 * 5. Read/written by CPU
1257 * 6. Read/written by GPU
1258 *
1259 * 1. Allocated
1260 * Same as last example, (CPU, CPU)
1261 * 2. Mapped to GTT
1262 * Nothing changes (assertions find that it is not in the GPU)
1263 * 3. Read/written by GPU
1264 * execbuffer calls set_domain (RENDER, RENDER)
1265 * flush_domains gets CPU
1266 * invalidate_domains gets GPU
1267 * clflush (obj)
1268 * MI_FLUSH and drm_agp_chipset_flush
1269 * 4. set_domain (CPU, CPU)
1270 * flush_domains gets GPU
1271 * invalidate_domains gets CPU
1272 * wait_rendering (obj) to make sure all drawing is complete.
1273 * This will include an MI_FLUSH to get the data from GPU
1274 * to memory
1275 * clflush (obj) to invalidate the CPU cache
1276 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1277 * 5. Read/written by CPU
1278 * cache lines are loaded and dirtied
1279 * 6. Read written by GPU
1280 * Same as last GPU access
1281 *
1282 * Case 3: The constant buffer
1283 *
1284 * 1. Allocated
1285 * 2. Written by CPU
1286 * 3. Read by GPU
1287 * 4. Updated (written) by CPU again
1288 * 5. Read by GPU
1289 *
1290 * 1. Allocated
1291 * (CPU, CPU)
1292 * 2. Written by CPU
1293 * (CPU, CPU)
1294 * 3. Read by GPU
1295 * (CPU+RENDER, 0)
1296 * flush_domains = CPU
1297 * invalidate_domains = RENDER
1298 * clflush (obj)
1299 * MI_FLUSH
1300 * drm_agp_chipset_flush
1301 * 4. Updated (written) by CPU again
1302 * (CPU, CPU)
1303 * flush_domains = 0 (no previous write domain)
1304 * invalidate_domains = 0 (no new read domains)
1305 * 5. Read by GPU
1306 * (CPU+RENDER, 0)
1307 * flush_domains = CPU
1308 * invalidate_domains = RENDER
1309 * clflush (obj)
1310 * MI_FLUSH
1311 * drm_agp_chipset_flush
1312 */
1313static int
1314i915_gem_object_set_domain(struct drm_gem_object *obj,
1315 uint32_t read_domains,
1316 uint32_t write_domain)
1317{
1318 struct drm_device *dev = obj->dev;
1319 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1320 uint32_t invalidate_domains = 0;
1321 uint32_t flush_domains = 0;
1322 int ret;
1323
1324#if WATCH_BUF
1325 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1326 __func__, obj,
1327 obj->read_domains, read_domains,
1328 obj->write_domain, write_domain);
1329#endif
1330 /*
1331 * If the object isn't moving to a new write domain,
1332 * let the object stay in multiple read domains
1333 */
1334 if (write_domain == 0)
1335 read_domains |= obj->read_domains;
1336 else
1337 obj_priv->dirty = 1;
1338
1339 /*
1340 * Flush the current write domain if
1341 * the new read domains don't match. Invalidate
1342 * any read domains which differ from the old
1343 * write domain
1344 */
1345 if (obj->write_domain && obj->write_domain != read_domains) {
1346 flush_domains |= obj->write_domain;
1347 invalidate_domains |= read_domains & ~obj->write_domain;
1348 }
1349 /*
1350 * Invalidate any read caches which may have
1351 * stale data. That is, any new read domains.
1352 */
1353 invalidate_domains |= read_domains & ~obj->read_domains;
1354 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1355#if WATCH_BUF
1356 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1357 __func__, flush_domains, invalidate_domains);
1358#endif
1359 /*
1360 * If we're invaliding the CPU cache and flushing a GPU cache,
1361 * then pause for rendering so that the GPU caches will be
1362 * flushed before the cpu cache is invalidated
1363 */
1364 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1365 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1366 I915_GEM_DOMAIN_GTT))) {
1367 ret = i915_gem_object_wait_rendering(obj);
1368 if (ret)
1369 return ret;
1370 }
1371 i915_gem_clflush_object(obj);
1372 }
1373
1374 if ((write_domain | flush_domains) != 0)
1375 obj->write_domain = write_domain;
1376
1377 /* If we're invalidating the CPU domain, clear the per-page CPU
1378 * domain list as well.
1379 */
1380 if (obj_priv->page_cpu_valid != NULL &&
1381 (write_domain != 0 ||
1382 read_domains & I915_GEM_DOMAIN_CPU)) {
1383 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1384 DRM_MEM_DRIVER);
1385 obj_priv->page_cpu_valid = NULL;
1386 }
1387 obj->read_domains = read_domains;
1388
1389 dev->invalidate_domains |= invalidate_domains;
1390 dev->flush_domains |= flush_domains;
1391#if WATCH_BUF
1392 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1393 __func__,
1394 obj->read_domains, obj->write_domain,
1395 dev->invalidate_domains, dev->flush_domains);
1396#endif
1397 return 0;
1398}
1399
1400/**
1401 * Set the read/write domain on a range of the object.
1402 *
1403 * Currently only implemented for CPU reads, otherwise drops to normal
1404 * i915_gem_object_set_domain().
1405 */
1406static int
1407i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1408 uint64_t offset,
1409 uint64_t size,
1410 uint32_t read_domains,
1411 uint32_t write_domain)
1412{
1413 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1414 int ret, i;
1415
1416 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1417 return 0;
1418
1419 if (read_domains != I915_GEM_DOMAIN_CPU ||
1420 write_domain != 0)
1421 return i915_gem_object_set_domain(obj,
1422 read_domains, write_domain);
1423
1424 /* Wait on any GPU rendering to the object to be flushed. */
1425 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1426 ret = i915_gem_object_wait_rendering(obj);
1427 if (ret)
1428 return ret;
1429 }
1430
1431 if (obj_priv->page_cpu_valid == NULL) {
1432 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1433 DRM_MEM_DRIVER);
1434 }
1435
1436 /* Flush the cache on any pages that are still invalid from the CPU's
1437 * perspective.
1438 */
1439 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1440 if (obj_priv->page_cpu_valid[i])
1441 continue;
1442
1443 drm_clflush_pages(obj_priv->page_list + i, 1);
1444
1445 obj_priv->page_cpu_valid[i] = 1;
1446 }
1447
1448 return 0;
1449}
1450
1451/**
1452 * Once all of the objects have been set in the proper domain,
1453 * perform the necessary flush and invalidate operations.
1454 *
1455 * Returns the write domains flushed, for use in flush tracking.
1456 */
1457static uint32_t
1458i915_gem_dev_set_domain(struct drm_device *dev)
1459{
1460 uint32_t flush_domains = dev->flush_domains;
1461
1462 /*
1463 * Now that all the buffers are synced to the proper domains,
1464 * flush and invalidate the collected domains
1465 */
1466 if (dev->invalidate_domains | dev->flush_domains) {
1467#if WATCH_EXEC
1468 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1469 __func__,
1470 dev->invalidate_domains,
1471 dev->flush_domains);
1472#endif
1473 i915_gem_flush(dev,
1474 dev->invalidate_domains,
1475 dev->flush_domains);
1476 dev->invalidate_domains = 0;
1477 dev->flush_domains = 0;
1478 }
1479
1480 return flush_domains;
1481}
1482
1483/**
1484 * Pin an object to the GTT and evaluate the relocations landing in it.
1485 */
1486static int
1487i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1488 struct drm_file *file_priv,
1489 struct drm_i915_gem_exec_object *entry)
1490{
1491 struct drm_device *dev = obj->dev;
1492 struct drm_i915_gem_relocation_entry reloc;
1493 struct drm_i915_gem_relocation_entry __user *relocs;
1494 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1495 int i, ret;
1496 uint32_t last_reloc_offset = -1;
1497 void __iomem *reloc_page = NULL;
1498
1499 /* Choose the GTT offset for our buffer and put it there. */
1500 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1501 if (ret)
1502 return ret;
1503
1504 entry->offset = obj_priv->gtt_offset;
1505
1506 relocs = (struct drm_i915_gem_relocation_entry __user *)
1507 (uintptr_t) entry->relocs_ptr;
1508 /* Apply the relocations, using the GTT aperture to avoid cache
1509 * flushing requirements.
1510 */
1511 for (i = 0; i < entry->relocation_count; i++) {
1512 struct drm_gem_object *target_obj;
1513 struct drm_i915_gem_object *target_obj_priv;
1514 uint32_t reloc_val, reloc_offset;
1515 uint32_t __iomem *reloc_entry;
1516
1517 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1518 if (ret != 0) {
1519 i915_gem_object_unpin(obj);
1520 return ret;
1521 }
1522
1523 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1524 reloc.target_handle);
1525 if (target_obj == NULL) {
1526 i915_gem_object_unpin(obj);
1527 return -EBADF;
1528 }
1529 target_obj_priv = target_obj->driver_private;
1530
1531 /* The target buffer should have appeared before us in the
1532 * exec_object list, so it should have a GTT space bound by now.
1533 */
1534 if (target_obj_priv->gtt_space == NULL) {
1535 DRM_ERROR("No GTT space found for object %d\n",
1536 reloc.target_handle);
1537 drm_gem_object_unreference(target_obj);
1538 i915_gem_object_unpin(obj);
1539 return -EINVAL;
1540 }
1541
1542 if (reloc.offset > obj->size - 4) {
1543 DRM_ERROR("Relocation beyond object bounds: "
1544 "obj %p target %d offset %d size %d.\n",
1545 obj, reloc.target_handle,
1546 (int) reloc.offset, (int) obj->size);
1547 drm_gem_object_unreference(target_obj);
1548 i915_gem_object_unpin(obj);
1549 return -EINVAL;
1550 }
1551 if (reloc.offset & 3) {
1552 DRM_ERROR("Relocation not 4-byte aligned: "
1553 "obj %p target %d offset %d.\n",
1554 obj, reloc.target_handle,
1555 (int) reloc.offset);
1556 drm_gem_object_unreference(target_obj);
1557 i915_gem_object_unpin(obj);
1558 return -EINVAL;
1559 }
1560
1561 if (reloc.write_domain && target_obj->pending_write_domain &&
1562 reloc.write_domain != target_obj->pending_write_domain) {
1563 DRM_ERROR("Write domain conflict: "
1564 "obj %p target %d offset %d "
1565 "new %08x old %08x\n",
1566 obj, reloc.target_handle,
1567 (int) reloc.offset,
1568 reloc.write_domain,
1569 target_obj->pending_write_domain);
1570 drm_gem_object_unreference(target_obj);
1571 i915_gem_object_unpin(obj);
1572 return -EINVAL;
1573 }
1574
1575#if WATCH_RELOC
1576 DRM_INFO("%s: obj %p offset %08x target %d "
1577 "read %08x write %08x gtt %08x "
1578 "presumed %08x delta %08x\n",
1579 __func__,
1580 obj,
1581 (int) reloc.offset,
1582 (int) reloc.target_handle,
1583 (int) reloc.read_domains,
1584 (int) reloc.write_domain,
1585 (int) target_obj_priv->gtt_offset,
1586 (int) reloc.presumed_offset,
1587 reloc.delta);
1588#endif
1589
1590 target_obj->pending_read_domains |= reloc.read_domains;
1591 target_obj->pending_write_domain |= reloc.write_domain;
1592
1593 /* If the relocation already has the right value in it, no
1594 * more work needs to be done.
1595 */
1596 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1597 drm_gem_object_unreference(target_obj);
1598 continue;
1599 }
1600
1601 /* Now that we're going to actually write some data in,
1602 * make sure that any rendering using this buffer's contents
1603 * is completed.
1604 */
1605 i915_gem_object_wait_rendering(obj);
1606
1607 /* As we're writing through the gtt, flush
1608 * any CPU writes before we write the relocations
1609 */
1610 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1611 i915_gem_clflush_object(obj);
1612 drm_agp_chipset_flush(dev);
1613 obj->write_domain = 0;
1614 }
1615
1616 /* Map the page containing the relocation we're going to
1617 * perform.
1618 */
1619 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1620 if (reloc_page == NULL ||
1621 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1622 (reloc_offset & ~(PAGE_SIZE - 1))) {
1623 if (reloc_page != NULL)
1624 iounmap(reloc_page);
1625
1626 reloc_page = ioremap_wc(dev->agp->base +
1627 (reloc_offset &
1628 ~(PAGE_SIZE - 1)),
1629 PAGE_SIZE);
1630 last_reloc_offset = reloc_offset;
1631 if (reloc_page == NULL) {
1632 drm_gem_object_unreference(target_obj);
1633 i915_gem_object_unpin(obj);
1634 return -ENOMEM;
1635 }
1636 }
1637
1638 reloc_entry = (uint32_t __iomem *)(reloc_page +
1639 (reloc_offset & (PAGE_SIZE - 1)));
1640 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1641
1642#if WATCH_BUF
1643 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1644 obj, (unsigned int) reloc.offset,
1645 readl(reloc_entry), reloc_val);
1646#endif
1647 writel(reloc_val, reloc_entry);
1648
1649 /* Write the updated presumed offset for this entry back out
1650 * to the user.
1651 */
1652 reloc.presumed_offset = target_obj_priv->gtt_offset;
1653 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1654 if (ret != 0) {
1655 drm_gem_object_unreference(target_obj);
1656 i915_gem_object_unpin(obj);
1657 return ret;
1658 }
1659
1660 drm_gem_object_unreference(target_obj);
1661 }
1662
1663 if (reloc_page != NULL)
1664 iounmap(reloc_page);
1665
1666#if WATCH_BUF
1667 if (0)
1668 i915_gem_dump_object(obj, 128, __func__, ~0);
1669#endif
1670 return 0;
1671}
1672
1673/** Dispatch a batchbuffer to the ring
1674 */
1675static int
1676i915_dispatch_gem_execbuffer(struct drm_device *dev,
1677 struct drm_i915_gem_execbuffer *exec,
1678 uint64_t exec_offset)
1679{
1680 drm_i915_private_t *dev_priv = dev->dev_private;
1681 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1682 (uintptr_t) exec->cliprects_ptr;
1683 int nbox = exec->num_cliprects;
1684 int i = 0, count;
1685 uint32_t exec_start, exec_len;
1686 RING_LOCALS;
1687
1688 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1689 exec_len = (uint32_t) exec->batch_len;
1690
1691 if ((exec_start | exec_len) & 0x7) {
1692 DRM_ERROR("alignment\n");
1693 return -EINVAL;
1694 }
1695
1696 if (!exec_start)
1697 return -EINVAL;
1698
1699 count = nbox ? nbox : 1;
1700
1701 for (i = 0; i < count; i++) {
1702 if (i < nbox) {
1703 int ret = i915_emit_box(dev, boxes, i,
1704 exec->DR1, exec->DR4);
1705 if (ret)
1706 return ret;
1707 }
1708
1709 if (IS_I830(dev) || IS_845G(dev)) {
1710 BEGIN_LP_RING(4);
1711 OUT_RING(MI_BATCH_BUFFER);
1712 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1713 OUT_RING(exec_start + exec_len - 4);
1714 OUT_RING(0);
1715 ADVANCE_LP_RING();
1716 } else {
1717 BEGIN_LP_RING(2);
1718 if (IS_I965G(dev)) {
1719 OUT_RING(MI_BATCH_BUFFER_START |
1720 (2 << 6) |
1721 MI_BATCH_NON_SECURE_I965);
1722 OUT_RING(exec_start);
1723 } else {
1724 OUT_RING(MI_BATCH_BUFFER_START |
1725 (2 << 6));
1726 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1727 }
1728 ADVANCE_LP_RING();
1729 }
1730 }
1731
1732 /* XXX breadcrumb */
1733 return 0;
1734}
1735
1736/* Throttle our rendering by waiting until the ring has completed our requests
1737 * emitted over 20 msec ago.
1738 *
1739 * This should get us reasonable parallelism between CPU and GPU but also
1740 * relatively low latency when blocking on a particular request to finish.
1741 */
1742static int
1743i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1744{
1745 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1746 int ret = 0;
1747 uint32_t seqno;
1748
1749 mutex_lock(&dev->struct_mutex);
1750 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1751 i915_file_priv->mm.last_gem_throttle_seqno =
1752 i915_file_priv->mm.last_gem_seqno;
1753 if (seqno)
1754 ret = i915_wait_request(dev, seqno);
1755 mutex_unlock(&dev->struct_mutex);
1756 return ret;
1757}
1758
1759int
1760i915_gem_execbuffer(struct drm_device *dev, void *data,
1761 struct drm_file *file_priv)
1762{
1763 drm_i915_private_t *dev_priv = dev->dev_private;
1764 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1765 struct drm_i915_gem_execbuffer *args = data;
1766 struct drm_i915_gem_exec_object *exec_list = NULL;
1767 struct drm_gem_object **object_list = NULL;
1768 struct drm_gem_object *batch_obj;
1769 int ret, i, pinned = 0;
1770 uint64_t exec_offset;
1771 uint32_t seqno, flush_domains;
1772
1773#if WATCH_EXEC
1774 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1775 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1776#endif
1777
1778 if (args->buffer_count < 1) {
1779 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1780 return -EINVAL;
1781 }
1782 /* Copy in the exec list from userland */
1783 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1784 DRM_MEM_DRIVER);
1785 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1786 DRM_MEM_DRIVER);
1787 if (exec_list == NULL || object_list == NULL) {
1788 DRM_ERROR("Failed to allocate exec or object list "
1789 "for %d buffers\n",
1790 args->buffer_count);
1791 ret = -ENOMEM;
1792 goto pre_mutex_err;
1793 }
1794 ret = copy_from_user(exec_list,
1795 (struct drm_i915_relocation_entry __user *)
1796 (uintptr_t) args->buffers_ptr,
1797 sizeof(*exec_list) * args->buffer_count);
1798 if (ret != 0) {
1799 DRM_ERROR("copy %d exec entries failed %d\n",
1800 args->buffer_count, ret);
1801 goto pre_mutex_err;
1802 }
1803
1804 mutex_lock(&dev->struct_mutex);
1805
1806 i915_verify_inactive(dev, __FILE__, __LINE__);
1807
1808 if (dev_priv->mm.wedged) {
1809 DRM_ERROR("Execbuf while wedged\n");
1810 mutex_unlock(&dev->struct_mutex);
1811 return -EIO;
1812 }
1813
1814 if (dev_priv->mm.suspended) {
1815 DRM_ERROR("Execbuf while VT-switched.\n");
1816 mutex_unlock(&dev->struct_mutex);
1817 return -EBUSY;
1818 }
1819
1820 /* Zero the gloabl flush/invalidate flags. These
1821 * will be modified as each object is bound to the
1822 * gtt
1823 */
1824 dev->invalidate_domains = 0;
1825 dev->flush_domains = 0;
1826
1827 /* Look up object handles and perform the relocations */
1828 for (i = 0; i < args->buffer_count; i++) {
1829 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1830 exec_list[i].handle);
1831 if (object_list[i] == NULL) {
1832 DRM_ERROR("Invalid object handle %d at index %d\n",
1833 exec_list[i].handle, i);
1834 ret = -EBADF;
1835 goto err;
1836 }
1837
1838 object_list[i]->pending_read_domains = 0;
1839 object_list[i]->pending_write_domain = 0;
1840 ret = i915_gem_object_pin_and_relocate(object_list[i],
1841 file_priv,
1842 &exec_list[i]);
1843 if (ret) {
1844 DRM_ERROR("object bind and relocate failed %d\n", ret);
1845 goto err;
1846 }
1847 pinned = i + 1;
1848 }
1849
1850 /* Set the pending read domains for the batch buffer to COMMAND */
1851 batch_obj = object_list[args->buffer_count-1];
1852 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1853 batch_obj->pending_write_domain = 0;
1854
1855 i915_verify_inactive(dev, __FILE__, __LINE__);
1856
1857 for (i = 0; i < args->buffer_count; i++) {
1858 struct drm_gem_object *obj = object_list[i];
1859 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1860
1861 if (obj_priv->gtt_space == NULL) {
1862 /* We evicted the buffer in the process of validating
1863 * our set of buffers in. We could try to recover by
1864 * kicking them everything out and trying again from
1865 * the start.
1866 */
1867 ret = -ENOMEM;
1868 goto err;
1869 }
1870
1871 /* make sure all previous memory operations have passed */
1872 ret = i915_gem_object_set_domain(obj,
1873 obj->pending_read_domains,
1874 obj->pending_write_domain);
1875 if (ret)
1876 goto err;
1877 }
1878
1879 i915_verify_inactive(dev, __FILE__, __LINE__);
1880
1881 /* Flush/invalidate caches and chipset buffer */
1882 flush_domains = i915_gem_dev_set_domain(dev);
1883
1884 i915_verify_inactive(dev, __FILE__, __LINE__);
1885
1886#if WATCH_COHERENCY
1887 for (i = 0; i < args->buffer_count; i++) {
1888 i915_gem_object_check_coherency(object_list[i],
1889 exec_list[i].handle);
1890 }
1891#endif
1892
1893 exec_offset = exec_list[args->buffer_count - 1].offset;
1894
1895#if WATCH_EXEC
1896 i915_gem_dump_object(object_list[args->buffer_count - 1],
1897 args->batch_len,
1898 __func__,
1899 ~0);
1900#endif
1901
1902 (void)i915_add_request(dev, flush_domains);
1903
1904 /* Exec the batchbuffer */
1905 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1906 if (ret) {
1907 DRM_ERROR("dispatch failed %d\n", ret);
1908 goto err;
1909 }
1910
1911 /*
1912 * Ensure that the commands in the batch buffer are
1913 * finished before the interrupt fires
1914 */
1915 flush_domains = i915_retire_commands(dev);
1916
1917 i915_verify_inactive(dev, __FILE__, __LINE__);
1918
1919 /*
1920 * Get a seqno representing the execution of the current buffer,
1921 * which we can wait on. We would like to mitigate these interrupts,
1922 * likely by only creating seqnos occasionally (so that we have
1923 * *some* interrupts representing completion of buffers that we can
1924 * wait on when trying to clear up gtt space).
1925 */
1926 seqno = i915_add_request(dev, flush_domains);
1927 BUG_ON(seqno == 0);
1928 i915_file_priv->mm.last_gem_seqno = seqno;
1929 for (i = 0; i < args->buffer_count; i++) {
1930 struct drm_gem_object *obj = object_list[i];
1931 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1932
1933 i915_gem_object_move_to_active(obj);
1934 obj_priv->last_rendering_seqno = seqno;
1935#if WATCH_LRU
1936 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1937#endif
1938 }
1939#if WATCH_LRU
1940 i915_dump_lru(dev, __func__);
1941#endif
1942
1943 i915_verify_inactive(dev, __FILE__, __LINE__);
1944
1945 /* Copy the new buffer offsets back to the user's exec list. */
1946 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1947 (uintptr_t) args->buffers_ptr,
1948 exec_list,
1949 sizeof(*exec_list) * args->buffer_count);
1950 if (ret)
1951 DRM_ERROR("failed to copy %d exec entries "
1952 "back to user (%d)\n",
1953 args->buffer_count, ret);
1954err:
1955 if (object_list != NULL) {
1956 for (i = 0; i < pinned; i++)
1957 i915_gem_object_unpin(object_list[i]);
1958
1959 for (i = 0; i < args->buffer_count; i++)
1960 drm_gem_object_unreference(object_list[i]);
1961 }
1962 mutex_unlock(&dev->struct_mutex);
1963
1964pre_mutex_err:
1965 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1966 DRM_MEM_DRIVER);
1967 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1968 DRM_MEM_DRIVER);
1969
1970 return ret;
1971}
1972
1973int
1974i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1975{
1976 struct drm_device *dev = obj->dev;
1977 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1978 int ret;
1979
1980 i915_verify_inactive(dev, __FILE__, __LINE__);
1981 if (obj_priv->gtt_space == NULL) {
1982 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1983 if (ret != 0) {
1984 DRM_ERROR("Failure to bind: %d", ret);
1985 return ret;
1986 }
1987 }
1988 obj_priv->pin_count++;
1989
1990 /* If the object is not active and not pending a flush,
1991 * remove it from the inactive list
1992 */
1993 if (obj_priv->pin_count == 1) {
1994 atomic_inc(&dev->pin_count);
1995 atomic_add(obj->size, &dev->pin_memory);
1996 if (!obj_priv->active &&
1997 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1998 I915_GEM_DOMAIN_GTT)) == 0 &&
1999 !list_empty(&obj_priv->list))
2000 list_del_init(&obj_priv->list);
2001 }
2002 i915_verify_inactive(dev, __FILE__, __LINE__);
2003
2004 return 0;
2005}
2006
2007void
2008i915_gem_object_unpin(struct drm_gem_object *obj)
2009{
2010 struct drm_device *dev = obj->dev;
2011 drm_i915_private_t *dev_priv = dev->dev_private;
2012 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2013
2014 i915_verify_inactive(dev, __FILE__, __LINE__);
2015 obj_priv->pin_count--;
2016 BUG_ON(obj_priv->pin_count < 0);
2017 BUG_ON(obj_priv->gtt_space == NULL);
2018
2019 /* If the object is no longer pinned, and is
2020 * neither active nor being flushed, then stick it on
2021 * the inactive list
2022 */
2023 if (obj_priv->pin_count == 0) {
2024 if (!obj_priv->active &&
2025 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2026 I915_GEM_DOMAIN_GTT)) == 0)
2027 list_move_tail(&obj_priv->list,
2028 &dev_priv->mm.inactive_list);
2029 atomic_dec(&dev->pin_count);
2030 atomic_sub(obj->size, &dev->pin_memory);
2031 }
2032 i915_verify_inactive(dev, __FILE__, __LINE__);
2033}
2034
2035int
2036i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2037 struct drm_file *file_priv)
2038{
2039 struct drm_i915_gem_pin *args = data;
2040 struct drm_gem_object *obj;
2041 struct drm_i915_gem_object *obj_priv;
2042 int ret;
2043
2044 mutex_lock(&dev->struct_mutex);
2045
2046 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2047 if (obj == NULL) {
2048 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2049 args->handle);
2050 mutex_unlock(&dev->struct_mutex);
2051 return -EBADF;
2052 }
2053 obj_priv = obj->driver_private;
2054
2055 ret = i915_gem_object_pin(obj, args->alignment);
2056 if (ret != 0) {
2057 drm_gem_object_unreference(obj);
2058 mutex_unlock(&dev->struct_mutex);
2059 return ret;
2060 }
2061
2062 /* XXX - flush the CPU caches for pinned objects
2063 * as the X server doesn't manage domains yet
2064 */
2065 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2066 i915_gem_clflush_object(obj);
2067 drm_agp_chipset_flush(dev);
2068 obj->write_domain = 0;
2069 }
2070 args->offset = obj_priv->gtt_offset;
2071 drm_gem_object_unreference(obj);
2072 mutex_unlock(&dev->struct_mutex);
2073
2074 return 0;
2075}
2076
2077int
2078i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2079 struct drm_file *file_priv)
2080{
2081 struct drm_i915_gem_pin *args = data;
2082 struct drm_gem_object *obj;
2083
2084 mutex_lock(&dev->struct_mutex);
2085
2086 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2087 if (obj == NULL) {
2088 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2089 args->handle);
2090 mutex_unlock(&dev->struct_mutex);
2091 return -EBADF;
2092 }
2093
2094 i915_gem_object_unpin(obj);
2095
2096 drm_gem_object_unreference(obj);
2097 mutex_unlock(&dev->struct_mutex);
2098 return 0;
2099}
2100
2101int
2102i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2103 struct drm_file *file_priv)
2104{
2105 struct drm_i915_gem_busy *args = data;
2106 struct drm_gem_object *obj;
2107 struct drm_i915_gem_object *obj_priv;
2108
2109 mutex_lock(&dev->struct_mutex);
2110 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2111 if (obj == NULL) {
2112 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2113 args->handle);
2114 mutex_unlock(&dev->struct_mutex);
2115 return -EBADF;
2116 }
2117
2118 obj_priv = obj->driver_private;
2119 args->busy = obj_priv->active;
2120
2121 drm_gem_object_unreference(obj);
2122 mutex_unlock(&dev->struct_mutex);
2123 return 0;
2124}
2125
2126int
2127i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2128 struct drm_file *file_priv)
2129{
2130 return i915_gem_ring_throttle(dev, file_priv);
2131}
2132
2133int i915_gem_init_object(struct drm_gem_object *obj)
2134{
2135 struct drm_i915_gem_object *obj_priv;
2136
2137 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2138 if (obj_priv == NULL)
2139 return -ENOMEM;
2140
2141 /*
2142 * We've just allocated pages from the kernel,
2143 * so they've just been written by the CPU with
2144 * zeros. They'll need to be clflushed before we
2145 * use them with the GPU.
2146 */
2147 obj->write_domain = I915_GEM_DOMAIN_CPU;
2148 obj->read_domains = I915_GEM_DOMAIN_CPU;
2149
2150 obj_priv->agp_type = AGP_USER_MEMORY;
2151
2152 obj->driver_private = obj_priv;
2153 obj_priv->obj = obj;
2154 INIT_LIST_HEAD(&obj_priv->list);
2155 return 0;
2156}
2157
2158void i915_gem_free_object(struct drm_gem_object *obj)
2159{
2160 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2161
2162 while (obj_priv->pin_count > 0)
2163 i915_gem_object_unpin(obj);
2164
2165 i915_gem_object_unbind(obj);
2166
2167 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2168 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2169}
2170
2171static int
2172i915_gem_set_domain(struct drm_gem_object *obj,
2173 struct drm_file *file_priv,
2174 uint32_t read_domains,
2175 uint32_t write_domain)
2176{
2177 struct drm_device *dev = obj->dev;
2178 int ret;
2179 uint32_t flush_domains;
2180
2181 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2182
2183 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2184 if (ret)
2185 return ret;
2186 flush_domains = i915_gem_dev_set_domain(obj->dev);
2187
2188 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2189 (void) i915_add_request(dev, flush_domains);
2190
2191 return 0;
2192}
2193
2194/** Unbinds all objects that are on the given buffer list. */
2195static int
2196i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2197{
2198 struct drm_gem_object *obj;
2199 struct drm_i915_gem_object *obj_priv;
2200 int ret;
2201
2202 while (!list_empty(head)) {
2203 obj_priv = list_first_entry(head,
2204 struct drm_i915_gem_object,
2205 list);
2206 obj = obj_priv->obj;
2207
2208 if (obj_priv->pin_count != 0) {
2209 DRM_ERROR("Pinned object in unbind list\n");
2210 mutex_unlock(&dev->struct_mutex);
2211 return -EINVAL;
2212 }
2213
2214 ret = i915_gem_object_unbind(obj);
2215 if (ret != 0) {
2216 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2217 ret);
2218 mutex_unlock(&dev->struct_mutex);
2219 return ret;
2220 }
2221 }
2222
2223
2224 return 0;
2225}
2226
2227static int
2228i915_gem_idle(struct drm_device *dev)
2229{
2230 drm_i915_private_t *dev_priv = dev->dev_private;
2231 uint32_t seqno, cur_seqno, last_seqno;
2232 int stuck, ret;
2233
2234 mutex_lock(&dev->struct_mutex);
2235
2236 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2237 mutex_unlock(&dev->struct_mutex);
2238 return 0;
2239 }
2240
2241 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2242 * We need to replace this with a semaphore, or something.
2243 */
2244 dev_priv->mm.suspended = 1;
2245
2246 /* Cancel the retire work handler, wait for it to finish if running
2247 */
2248 mutex_unlock(&dev->struct_mutex);
2249 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2250 mutex_lock(&dev->struct_mutex);
2251
2252 i915_kernel_lost_context(dev);
2253
2254 /* Flush the GPU along with all non-CPU write domains
2255 */
2256 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2257 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2258 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2259 I915_GEM_DOMAIN_GTT));
2260
2261 if (seqno == 0) {
2262 mutex_unlock(&dev->struct_mutex);
2263 return -ENOMEM;
2264 }
2265
2266 dev_priv->mm.waiting_gem_seqno = seqno;
2267 last_seqno = 0;
2268 stuck = 0;
2269 for (;;) {
2270 cur_seqno = i915_get_gem_seqno(dev);
2271 if (i915_seqno_passed(cur_seqno, seqno))
2272 break;
2273 if (last_seqno == cur_seqno) {
2274 if (stuck++ > 100) {
2275 DRM_ERROR("hardware wedged\n");
2276 dev_priv->mm.wedged = 1;
2277 DRM_WAKEUP(&dev_priv->irq_queue);
2278 break;
2279 }
2280 }
2281 msleep(10);
2282 last_seqno = cur_seqno;
2283 }
2284 dev_priv->mm.waiting_gem_seqno = 0;
2285
2286 i915_gem_retire_requests(dev);
2287
2288 /* Active and flushing should now be empty as we've
2289 * waited for a sequence higher than any pending execbuffer
2290 */
2291 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2292 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2293
2294 /* Request should now be empty as we've also waited
2295 * for the last request in the list
2296 */
2297 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2298
2299 /* Move all buffers out of the GTT. */
2300 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2301 if (ret) {
2302 mutex_unlock(&dev->struct_mutex);
2303 return ret;
2304 }
2305
2306 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2307 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2308 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2309 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2310
2311 i915_gem_cleanup_ringbuffer(dev);
2312 mutex_unlock(&dev->struct_mutex);
2313
2314 return 0;
2315}
2316
2317static int
2318i915_gem_init_hws(struct drm_device *dev)
2319{
2320 drm_i915_private_t *dev_priv = dev->dev_private;
2321 struct drm_gem_object *obj;
2322 struct drm_i915_gem_object *obj_priv;
2323 int ret;
2324
2325 /* If we need a physical address for the status page, it's already
2326 * initialized at driver load time.
2327 */
2328 if (!I915_NEED_GFX_HWS(dev))
2329 return 0;
2330
2331 obj = drm_gem_object_alloc(dev, 4096);
2332 if (obj == NULL) {
2333 DRM_ERROR("Failed to allocate status page\n");
2334 return -ENOMEM;
2335 }
2336 obj_priv = obj->driver_private;
2337 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
2338
2339 ret = i915_gem_object_pin(obj, 4096);
2340 if (ret != 0) {
2341 drm_gem_object_unreference(obj);
2342 return ret;
2343 }
2344
2345 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2346
2347 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
2348 if (dev_priv->hw_status_page == NULL) {
2349 DRM_ERROR("Failed to map status page.\n");
2350 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2351 drm_gem_object_unreference(obj);
2352 return -EINVAL;
2353 }
2354 dev_priv->hws_obj = obj;
2355 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2356 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2357 I915_READ(HWS_PGA); /* posting read */
2358 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2359
2360 return 0;
2361}
2362
2363static int
2364i915_gem_init_ringbuffer(struct drm_device *dev)
2365{
2366 drm_i915_private_t *dev_priv = dev->dev_private;
2367 struct drm_gem_object *obj;
2368 struct drm_i915_gem_object *obj_priv;
2369 int ret;
2370 u32 head;
2371
2372 ret = i915_gem_init_hws(dev);
2373 if (ret != 0)
2374 return ret;
2375
2376 obj = drm_gem_object_alloc(dev, 128 * 1024);
2377 if (obj == NULL) {
2378 DRM_ERROR("Failed to allocate ringbuffer\n");
2379 return -ENOMEM;
2380 }
2381 obj_priv = obj->driver_private;
2382
2383 ret = i915_gem_object_pin(obj, 4096);
2384 if (ret != 0) {
2385 drm_gem_object_unreference(obj);
2386 return ret;
2387 }
2388
2389 /* Set up the kernel mapping for the ring. */
2390 dev_priv->ring.Size = obj->size;
2391 dev_priv->ring.tail_mask = obj->size - 1;
2392
2393 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2394 dev_priv->ring.map.size = obj->size;
2395 dev_priv->ring.map.type = 0;
2396 dev_priv->ring.map.flags = 0;
2397 dev_priv->ring.map.mtrr = 0;
2398
2399 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
2400 if (dev_priv->ring.map.handle == NULL) {
2401 DRM_ERROR("Failed to map ringbuffer.\n");
2402 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2403 drm_gem_object_unreference(obj);
2404 return -EINVAL;
2405 }
2406 dev_priv->ring.ring_obj = obj;
2407 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2408
2409 /* Stop the ring if it's running. */
2410 I915_WRITE(PRB0_CTL, 0);
2411 I915_WRITE(PRB0_TAIL, 0);
2412 I915_WRITE(PRB0_HEAD, 0);
2413
2414 /* Initialize the ring. */
2415 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2416 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2417
2418 /* G45 ring initialization fails to reset head to zero */
2419 if (head != 0) {
2420 DRM_ERROR("Ring head not reset to zero "
2421 "ctl %08x head %08x tail %08x start %08x\n",
2422 I915_READ(PRB0_CTL),
2423 I915_READ(PRB0_HEAD),
2424 I915_READ(PRB0_TAIL),
2425 I915_READ(PRB0_START));
2426 I915_WRITE(PRB0_HEAD, 0);
2427
2428 DRM_ERROR("Ring head forced to zero "
2429 "ctl %08x head %08x tail %08x start %08x\n",
2430 I915_READ(PRB0_CTL),
2431 I915_READ(PRB0_HEAD),
2432 I915_READ(PRB0_TAIL),
2433 I915_READ(PRB0_START));
2434 }
2435
2436 I915_WRITE(PRB0_CTL,
2437 ((obj->size - 4096) & RING_NR_PAGES) |
2438 RING_NO_REPORT |
2439 RING_VALID);
2440
2441 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
2442
2443 /* If the head is still not zero, the ring is dead */
2444 if (head != 0) {
2445 DRM_ERROR("Ring initialization failed "
2446 "ctl %08x head %08x tail %08x start %08x\n",
2447 I915_READ(PRB0_CTL),
2448 I915_READ(PRB0_HEAD),
2449 I915_READ(PRB0_TAIL),
2450 I915_READ(PRB0_START));
2451 return -EIO;
2452 }
2453
2454 /* Update our cache of the ring state */
2455 i915_kernel_lost_context(dev);
2456
2457 return 0;
2458}
2459
2460static void
2461i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2462{
2463 drm_i915_private_t *dev_priv = dev->dev_private;
2464
2465 if (dev_priv->ring.ring_obj == NULL)
2466 return;
2467
2468 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2469
2470 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2471 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2472 dev_priv->ring.ring_obj = NULL;
2473 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2474
2475 if (dev_priv->hws_obj != NULL) {
2476 struct drm_gem_object *obj = dev_priv->hws_obj;
2477 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2478
2479 kunmap(obj_priv->page_list[0]);
2480 i915_gem_object_unpin(obj);
2481 drm_gem_object_unreference(obj);
2482 dev_priv->hws_obj = NULL;
2483 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2484 dev_priv->hw_status_page = NULL;
2485
2486 /* Write high address into HWS_PGA when disabling. */
2487 I915_WRITE(HWS_PGA, 0x1ffff000);
2488 }
2489}
2490
2491int
2492i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2493 struct drm_file *file_priv)
2494{
2495 drm_i915_private_t *dev_priv = dev->dev_private;
2496 int ret;
2497
2498 if (dev_priv->mm.wedged) {
2499 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2500 dev_priv->mm.wedged = 0;
2501 }
2502
2503 ret = i915_gem_init_ringbuffer(dev);
2504 if (ret != 0)
2505 return ret;
2506
2507 mutex_lock(&dev->struct_mutex);
2508 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2509 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2510 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2511 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2512 dev_priv->mm.suspended = 0;
2513 mutex_unlock(&dev->struct_mutex);
2514
2515 drm_irq_install(dev);
2516
2517 return 0;
2518}
2519
2520int
2521i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2522 struct drm_file *file_priv)
2523{
2524 int ret;
2525
2526 ret = i915_gem_idle(dev);
2527 drm_irq_uninstall(dev);
2528
2529 return ret;
2530}
2531
2532void
2533i915_gem_lastclose(struct drm_device *dev)
2534{
2535 int ret;
2536
2537 ret = i915_gem_idle(dev);
2538 if (ret)
2539 DRM_ERROR("failed to idle hardware: %d\n", ret);
2540}
2541
2542void
2543i915_gem_load(struct drm_device *dev)
2544{
2545 drm_i915_private_t *dev_priv = dev->dev_private;
2546
2547 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2548 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2549 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2550 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2551 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2552 i915_gem_retire_work_handler);
2553 INIT_WORK(&dev_priv->mm.vblank_work,
2554 i915_gem_vblank_work_handler);
2555 dev_priv->mm.next_gem_seqno = 1;
2556
2557 i915_gem_detect_bit_6_swizzle(dev);
2558}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
new file mode 100644
index 000000000000..131c088f8c8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32
33#if WATCH_INACTIVE
34void
35i915_verify_inactive(struct drm_device *dev, char *file, int line)
36{
37 drm_i915_private_t *dev_priv = dev->dev_private;
38 struct drm_gem_object *obj;
39 struct drm_i915_gem_object *obj_priv;
40
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
42 obj = obj_priv->obj;
43 if (obj_priv->pin_count || obj_priv->active ||
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
45 I915_GEM_DOMAIN_GTT)))
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
47 obj,
48 obj_priv->pin_count, obj_priv->active,
49 obj->write_domain, file, line);
50 }
51}
52#endif /* WATCH_INACTIVE */
53
54
55#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
56static void
57i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
58 uint32_t bias, uint32_t mark)
59{
60 uint32_t *mem = kmap_atomic(page, KM_USER0);
61 int i;
62 for (i = start; i < end; i += 4)
63 DRM_INFO("%08x: %08x%s\n",
64 (int) (bias + i), mem[i / 4],
65 (bias + i == mark) ? " ********" : "");
66 kunmap_atomic(mem, KM_USER0);
67 /* give syslog time to catch up */
68 msleep(1);
69}
70
71void
72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark)
74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private;
76 int page;
77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
79 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
80 int page_len, chunk, chunk_len;
81
82 page_len = len - page * PAGE_SIZE;
83 if (page_len > PAGE_SIZE)
84 page_len = PAGE_SIZE;
85
86 for (chunk = 0; chunk < page_len; chunk += 128) {
87 chunk_len = page_len - chunk;
88 if (chunk_len > 128)
89 chunk_len = 128;
90 i915_gem_dump_page(obj_priv->page_list[page],
91 chunk, chunk + chunk_len,
92 obj_priv->gtt_offset +
93 page * PAGE_SIZE,
94 mark);
95 }
96 }
97}
98#endif
99
100#if WATCH_LRU
101void
102i915_dump_lru(struct drm_device *dev, const char *where)
103{
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct drm_i915_gem_object *obj_priv;
106
107 DRM_INFO("active list %s {\n", where);
108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
109 list)
110 {
111 DRM_INFO(" %p: %08x\n", obj_priv,
112 obj_priv->last_rendering_seqno);
113 }
114 DRM_INFO("}\n");
115 DRM_INFO("flushing list %s {\n", where);
116 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
117 list)
118 {
119 DRM_INFO(" %p: %08x\n", obj_priv,
120 obj_priv->last_rendering_seqno);
121 }
122 DRM_INFO("}\n");
123 DRM_INFO("inactive %s {\n", where);
124 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
125 DRM_INFO(" %p: %08x\n", obj_priv,
126 obj_priv->last_rendering_seqno);
127 }
128 DRM_INFO("}\n");
129}
130#endif
131
132
133#if WATCH_COHERENCY
134void
135i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
136{
137 struct drm_device *dev = obj->dev;
138 struct drm_i915_gem_object *obj_priv = obj->driver_private;
139 int page;
140 uint32_t *gtt_mapping;
141 uint32_t *backing_map = NULL;
142 int bad_count = 0;
143
144 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
145 __func__, obj, obj_priv->gtt_offset, handle,
146 obj->size / 1024);
147
148 gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
149 obj->size);
150 if (gtt_mapping == NULL) {
151 DRM_ERROR("failed to map GTT space\n");
152 return;
153 }
154
155 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
156 int i;
157
158 backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
159
160 if (backing_map == NULL) {
161 DRM_ERROR("failed to map backing page\n");
162 goto out;
163 }
164
165 for (i = 0; i < PAGE_SIZE / 4; i++) {
166 uint32_t cpuval = backing_map[i];
167 uint32_t gttval = readl(gtt_mapping +
168 page * 1024 + i);
169
170 if (cpuval != gttval) {
171 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
172 "0x%08x vs 0x%08x\n",
173 (int)(obj_priv->gtt_offset +
174 page * PAGE_SIZE + i * 4),
175 cpuval, gttval);
176 if (bad_count++ >= 8) {
177 DRM_INFO("...\n");
178 goto out;
179 }
180 }
181 }
182 kunmap_atomic(backing_map, KM_USER0);
183 backing_map = NULL;
184 }
185
186 out:
187 if (backing_map != NULL)
188 kunmap_atomic(backing_map, KM_USER0);
189 iounmap(gtt_mapping);
190
191 /* give syslog time to catch up */
192 msleep(1);
193
194 /* Directly flush the object, since we just loaded values with the CPU
195 * from the backing pages and we don't want to disturb the cache
196 * management that we're trying to observe.
197 */
198
199 i915_gem_clflush_object(obj);
200}
201#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
new file mode 100644
index 000000000000..15d4160415b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies),
172 gem_request->flush_domains);
173 }
174 if (len > request + offset)
175 return request;
176 *eof = 1;
177 return len - offset;
178}
179
180static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
181 int request, int *eof, void *data)
182{
183 struct drm_minor *minor = (struct drm_minor *) data;
184 struct drm_device *dev = minor->dev;
185 drm_i915_private_t *dev_priv = dev->dev_private;
186 int len = 0;
187
188 if (offset > DRM_PROC_LIMIT) {
189 *eof = 1;
190 return 0;
191 }
192
193 *start = &buf[offset];
194 *eof = 0;
195 DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
196 DRM_PROC_PRINT("Waiter sequence: %d\n",
197 dev_priv->mm.waiting_gem_seqno);
198 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
199 if (len > request + offset)
200 return request;
201 *eof = 1;
202 return len - offset;
203}
204
205
206static int i915_interrupt_info(char *buf, char **start, off_t offset,
207 int request, int *eof, void *data)
208{
209 struct drm_minor *minor = (struct drm_minor *) data;
210 struct drm_device *dev = minor->dev;
211 drm_i915_private_t *dev_priv = dev->dev_private;
212 int len = 0;
213
214 if (offset > DRM_PROC_LIMIT) {
215 *eof = 1;
216 return 0;
217 }
218
219 *start = &buf[offset];
220 *eof = 0;
221 DRM_PROC_PRINT("Interrupt enable: %08x\n",
222 I915_READ(IER));
223 DRM_PROC_PRINT("Interrupt identity: %08x\n",
224 I915_READ(IIR));
225 DRM_PROC_PRINT("Interrupt mask: %08x\n",
226 I915_READ(IMR));
227 DRM_PROC_PRINT("Pipe A stat: %08x\n",
228 I915_READ(PIPEASTAT));
229 DRM_PROC_PRINT("Pipe B stat: %08x\n",
230 I915_READ(PIPEBSTAT));
231 DRM_PROC_PRINT("Interrupts received: %d\n",
232 atomic_read(&dev_priv->irq_received));
233 DRM_PROC_PRINT("Current sequence: %d\n",
234 i915_get_gem_seqno(dev));
235 DRM_PROC_PRINT("Waiter sequence: %d\n",
236 dev_priv->mm.waiting_gem_seqno);
237 DRM_PROC_PRINT("IRQ sequence: %d\n",
238 dev_priv->mm.irq_gem_seqno);
239 if (len > request + offset)
240 return request;
241 *eof = 1;
242 return len - offset;
243}
244
245static struct drm_proc_list {
246 /** file name */
247 const char *name;
248 /** proc callback*/
249 int (*f) (char *, char **, off_t, int, int *, void *);
250} i915_gem_proc_list[] = {
251 {"i915_gem_active", i915_gem_active_info},
252 {"i915_gem_flushing", i915_gem_flushing_info},
253 {"i915_gem_inactive", i915_gem_inactive_info},
254 {"i915_gem_request", i915_gem_request_info},
255 {"i915_gem_seqno", i915_gem_seqno_info},
256 {"i915_gem_interrupt", i915_interrupt_info},
257};
258
259#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
260
261int i915_gem_proc_init(struct drm_minor *minor)
262{
263 struct proc_dir_entry *ent;
264 int i, j;
265
266 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
267 ent = create_proc_entry(i915_gem_proc_list[i].name,
268 S_IFREG | S_IRUGO, minor->dev_root);
269 if (!ent) {
270 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
271 i915_gem_proc_list[i].name);
272 for (j = 0; j < i; j++)
273 remove_proc_entry(i915_gem_proc_list[i].name,
274 minor->dev_root);
275 return -1;
276 }
277 ent->read_proc = i915_gem_proc_list[i].f;
278 ent->data = minor;
279 }
280 return 0;
281}
282
283void i915_gem_proc_cleanup(struct drm_minor *minor)
284{
285 int i;
286
287 if (!minor->dev_root)
288 return;
289
290 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
291 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
292}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
new file mode 100644
index 000000000000..e8b85ac4ca04
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32
33/** @file i915_gem_tiling.c
34 *
35 * Support for managing tiling state of buffer objects.
36 *
37 * The idea behind tiling is to increase cache hit rates by rearranging
38 * pixel data so that a group of pixel accesses are in the same cacheline.
39 * Performance improvement from doing this on the back/depth buffer are on
40 * the order of 30%.
41 *
42 * Intel architectures make this somewhat more complicated, though, by
43 * adjustments made to addressing of data when the memory is in interleaved
44 * mode (matched pairs of DIMMS) to improve memory bandwidth.
45 * For interleaved memory, the CPU sends every sequential 64 bytes
46 * to an alternate memory channel so it can get the bandwidth from both.
47 *
48 * The GPU also rearranges its accesses for increased bandwidth to interleaved
49 * memory, and it matches what the CPU does for non-tiled. However, when tiled
50 * it does it a little differently, since one walks addresses not just in the
51 * X direction but also Y. So, along with alternating channels when bit
52 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
53 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
54 * are common to both the 915 and 965-class hardware.
55 *
56 * The CPU also sometimes XORs in higher bits as well, to improve
57 * bandwidth doing strided access like we do so frequently in graphics. This
58 * is called "Channel XOR Randomization" in the MCH documentation. The result
59 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
60 * decode.
61 *
62 * All of this bit 6 XORing has an effect on our memory management,
63 * as we need to make sure that the 3d driver can correctly address object
64 * contents.
65 *
66 * If we don't have interleaved memory, all tiling is safe and no swizzling is
67 * required.
68 *
69 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
70 * 17 is not just a page offset, so as we page an objet out and back in,
71 * individual pages in it will have different bit 17 addresses, resulting in
72 * each 64 bytes being swapped with its neighbor!
73 *
74 * Otherwise, if interleaved, we have to tell the 3d driver what the address
75 * swizzling it needs to do is, since it's writing with the CPU to the pages
76 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
77 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
78 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
79 * to match what the GPU expects.
80 */
81
82/**
83 * Detects bit 6 swizzling of address lookup between IGD access and CPU
84 * access through main memory.
85 */
86void
87i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
88{
89 drm_i915_private_t *dev_priv = dev->dev_private;
90 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
91 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
92
93 if (!IS_I9XX(dev)) {
94 /* As far as we know, the 865 doesn't have these bit 6
95 * swizzling issues.
96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
100 IS_GM45(dev)) {
101 uint32_t dcc;
102
103 /* On 915-945 and GM965, channel interleave by the CPU is
104 * determined by DCC. The CPU will alternate based on bit 6
105 * in interleaved mode, and the GPU will then also alternate
106 * on bit 6, 9, and 10 for X, but the CPU may also optionally
107 * alternate based on bit 17 (XOR not disabled and XOR
108 * bit == 17).
109 */
110 dcc = I915_READ(DCC);
111 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
112 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
113 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
114 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
115 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
116 break;
117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
118 if (IS_I915G(dev) || IS_I915GM(dev) ||
119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) {
123 /* GM965 only does bit 11-based channel
124 * randomization
125 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else {
129 /* Bit 17 or perhaps other swizzling */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
132 }
133 break;
134 }
135 if (dcc == 0xffffffff) {
136 DRM_ERROR("Couldn't read from MCHBAR. "
137 "Disabling tiling.\n");
138 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
139 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
140 }
141 } else {
142 /* The 965, G33, and newer, have a very flexible memory
143 * configuration. It will enable dual-channel mode
144 * (interleaving) on as much memory as it can, and the GPU
145 * will additionally sometimes enable different bit 6
146 * swizzling for tiled objects from the CPU.
147 *
148 * Here's what I found on the G965:
149 * slot fill memory size swizzling
150 * 0A 0B 1A 1B 1-ch 2-ch
151 * 512 0 0 0 512 0 O
152 * 512 0 512 0 16 1008 X
153 * 512 0 0 512 16 1008 X
154 * 0 512 0 512 16 1008 X
155 * 1024 1024 1024 0 2048 1024 O
156 *
157 * We could probably detect this based on either the DRB
158 * matching, which was the case for the swizzling required in
159 * the table above, or from the 1-ch value being less than
160 * the minimum size of a rank.
161 */
162 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
163 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
164 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
165 } else {
166 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
167 swizzle_y = I915_BIT_6_SWIZZLE_9;
168 }
169 }
170
171 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
172 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
173}
174
175/**
176 * Sets the tiling mode of an object, returning the required swizzling of
177 * bit 6 of addresses in the object.
178 */
179int
180i915_gem_set_tiling(struct drm_device *dev, void *data,
181 struct drm_file *file_priv)
182{
183 struct drm_i915_gem_set_tiling *args = data;
184 drm_i915_private_t *dev_priv = dev->dev_private;
185 struct drm_gem_object *obj;
186 struct drm_i915_gem_object *obj_priv;
187
188 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
189 if (obj == NULL)
190 return -EINVAL;
191 obj_priv = obj->driver_private;
192
193 mutex_lock(&dev->struct_mutex);
194
195 if (args->tiling_mode == I915_TILING_NONE) {
196 obj_priv->tiling_mode = I915_TILING_NONE;
197 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
198 } else {
199 if (args->tiling_mode == I915_TILING_X)
200 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
201 else
202 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
203 /* If we can't handle the swizzling, make it untiled. */
204 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
205 args->tiling_mode = I915_TILING_NONE;
206 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
207 }
208 }
209 obj_priv->tiling_mode = args->tiling_mode;
210
211 mutex_unlock(&dev->struct_mutex);
212
213 drm_gem_object_unreference(obj);
214
215 return 0;
216}
217
218/**
219 * Returns the current tiling mode and required bit 6 swizzling for the object.
220 */
221int
222i915_gem_get_tiling(struct drm_device *dev, void *data,
223 struct drm_file *file_priv)
224{
225 struct drm_i915_gem_get_tiling *args = data;
226 drm_i915_private_t *dev_priv = dev->dev_private;
227 struct drm_gem_object *obj;
228 struct drm_i915_gem_object *obj_priv;
229
230 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
231 if (obj == NULL)
232 return -EINVAL;
233 obj_priv = obj->driver_private;
234
235 mutex_lock(&dev->struct_mutex);
236
237 args->tiling_mode = obj_priv->tiling_mode;
238 switch (obj_priv->tiling_mode) {
239 case I915_TILING_X:
240 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
241 break;
242 case I915_TILING_Y:
243 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
244 break;
245 case I915_TILING_NONE:
246 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
247 break;
248 default:
249 DRM_ERROR("unknown tiling mode\n");
250 }
251
252 mutex_unlock(&dev->struct_mutex);
253
254 drm_gem_object_unreference(obj);
255
256 return 0;
257}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df036118b8b1..baae511c785b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,12 +31,92 @@
31#include "i915_drm.h" 31#include "i915_drm.h"
32#include "i915_drv.h" 32#include "i915_drv.h"
33 33
34#define USER_INT_FLAG (1<<1)
35#define VSYNC_PIPEB_FLAG (1<<5)
36#define VSYNC_PIPEA_FLAG (1<<7)
37
38#define MAX_NOPID ((u32)~0) 34#define MAX_NOPID ((u32)~0)
39 35
36/** These are the interrupts used by the driver */
37#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
38 I915_ASLE_INTERRUPT | \
39 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
40 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
41
42void
43i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
44{
45 if ((dev_priv->irq_mask_reg & mask) != 0) {
46 dev_priv->irq_mask_reg &= ~mask;
47 I915_WRITE(IMR, dev_priv->irq_mask_reg);
48 (void) I915_READ(IMR);
49 }
50}
51
52static inline void
53i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
54{
55 if ((dev_priv->irq_mask_reg & mask) != mask) {
56 dev_priv->irq_mask_reg |= mask;
57 I915_WRITE(IMR, dev_priv->irq_mask_reg);
58 (void) I915_READ(IMR);
59 }
60}
61
62/**
63 * i915_get_pipe - return the the pipe associated with a given plane
64 * @dev: DRM device
65 * @plane: plane to look for
66 *
67 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
68 * rather than a pipe number, since they may not always be equal. This routine
69 * maps the given @plane back to a pipe number.
70 */
71static int
72i915_get_pipe(struct drm_device *dev, int plane)
73{
74 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
75 u32 dspcntr;
76
77 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
78
79 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
80}
81
82/**
83 * i915_get_plane - return the the plane associated with a given pipe
84 * @dev: DRM device
85 * @pipe: pipe to look for
86 *
87 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
88 * rather than a plane number, since they may not always be equal. This routine
89 * maps the given @pipe back to a plane number.
90 */
91static int
92i915_get_plane(struct drm_device *dev, int pipe)
93{
94 if (i915_get_pipe(dev, 0) == pipe)
95 return 0;
96 return 1;
97}
98
99/**
100 * i915_pipe_enabled - check if a pipe is enabled
101 * @dev: DRM device
102 * @pipe: pipe to check
103 *
104 * Reading certain registers when the pipe is disabled can hang the chip.
105 * Use this routine to make sure the PLL is running and the pipe is active
106 * before reading such registers if unsure.
107 */
108static int
109i915_pipe_enabled(struct drm_device *dev, int pipe)
110{
111 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
112 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
113
114 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
115 return 1;
116
117 return 0;
118}
119
40/** 120/**
41 * Emit blits for scheduled buffer swaps. 121 * Emit blits for scheduled buffer swaps.
42 * 122 *
@@ -48,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
48 unsigned long irqflags; 128 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit; 129 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i; 130 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received), 131 unsigned counter[2];
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw; 132 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 133 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp; 134 u32 cpp = dev_priv->cpp;
@@ -71,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
71 src_pitch >>= 2; 150 src_pitch >>= 2;
72 } 151 }
73 152
153 counter[0] = drm_vblank_count(dev, 0);
154 counter[1] = drm_vblank_count(dev, 1);
155
74 DRM_DEBUG("\n"); 156 DRM_DEBUG("\n");
75 157
76 INIT_LIST_HEAD(&hits); 158 INIT_LIST_HEAD(&hits);
@@ -83,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
83 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
84 drm_i915_vbl_swap_t *vbl_swap = 166 drm_i915_vbl_swap_t *vbl_swap =
85 list_entry(list, drm_i915_vbl_swap_t, head); 167 list_entry(list, drm_i915_vbl_swap_t, head);
168 int pipe = i915_get_pipe(dev, vbl_swap->plane);
86 169
87 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) 170 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
88 continue; 171 continue;
89 172
90 list_del(list); 173 list_del(list);
91 dev_priv->swaps_pending--; 174 dev_priv->swaps_pending--;
175 drm_vblank_put(dev, pipe);
92 176
93 spin_unlock(&dev_priv->swaps_lock); 177 spin_unlock(&dev_priv->swaps_lock);
94 spin_lock(&dev->drw_lock); 178 spin_lock(&dev->drw_lock);
@@ -181,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
181 drm_i915_vbl_swap_t *swap_hit = 265 drm_i915_vbl_swap_t *swap_hit =
182 list_entry(hit, drm_i915_vbl_swap_t, head); 266 list_entry(hit, drm_i915_vbl_swap_t, head);
183 struct drm_clip_rect *rect; 267 struct drm_clip_rect *rect;
184 int num_rects, pipe; 268 int num_rects, plane;
185 unsigned short top, bottom; 269 unsigned short top, bottom;
186 270
187 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 271 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -190,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
190 continue; 274 continue;
191 275
192 rect = drw->rects; 276 rect = drw->rects;
193 pipe = swap_hit->pipe; 277 plane = swap_hit->plane;
194 top = upper[pipe]; 278 top = upper[plane];
195 bottom = lower[pipe]; 279 bottom = lower[plane];
196 280
197 for (num_rects = drw->num_rects; num_rects--; rect++) { 281 for (num_rects = drw->num_rects; num_rects--; rect++) {
198 int y1 = max(rect->y1, top); 282 int y1 = max(rect->y1, top);
@@ -229,61 +313,139 @@ static void i915_vblank_tasklet(struct drm_device *dev)
229 } 313 }
230} 314}
231 315
316u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
317{
318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
319 unsigned long high_frame;
320 unsigned long low_frame;
321 u32 high1, high2, low, count;
322 int pipe;
323
324 pipe = i915_get_pipe(dev, plane);
325 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
326 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
327
328 if (!i915_pipe_enabled(dev, pipe)) {
329 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
330 return 0;
331 }
332
333 /*
334 * High & low register fields aren't synchronized, so make sure
335 * we get a low value that's stable across two reads of the high
336 * register.
337 */
338 do {
339 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
340 PIPE_FRAME_HIGH_SHIFT);
341 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
342 PIPE_FRAME_LOW_SHIFT);
343 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
344 PIPE_FRAME_HIGH_SHIFT);
345 } while (high1 != high2);
346
347 count = (high1 << 8) | low;
348
349 return count;
350}
351
352void
353i915_gem_vblank_work_handler(struct work_struct *work)
354{
355 drm_i915_private_t *dev_priv;
356 struct drm_device *dev;
357
358 dev_priv = container_of(work, drm_i915_private_t,
359 mm.vblank_work);
360 dev = dev_priv->dev;
361
362 mutex_lock(&dev->struct_mutex);
363 i915_vblank_tasklet(dev);
364 mutex_unlock(&dev->struct_mutex);
365}
366
232irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 367irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
233{ 368{
234 struct drm_device *dev = (struct drm_device *) arg; 369 struct drm_device *dev = (struct drm_device *) arg;
235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
236 u16 temp; 371 u32 iir;
237 u32 pipea_stats, pipeb_stats; 372 u32 pipea_stats, pipeb_stats;
373 int vblank = 0;
238 374
239 pipea_stats = I915_READ(I915REG_PIPEASTAT); 375 atomic_inc(&dev_priv->irq_received);
240 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
241 376
242 temp = I915_READ16(I915REG_INT_IDENTITY_R); 377 if (dev->pdev->msi_enabled)
378 I915_WRITE(IMR, ~0);
379 iir = I915_READ(IIR);
243 380
244 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); 381 if (iir == 0) {
382 if (dev->pdev->msi_enabled) {
383 I915_WRITE(IMR, dev_priv->irq_mask_reg);
384 (void) I915_READ(IMR);
385 }
386 return IRQ_NONE;
387 }
245 388
246 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 389 /*
390 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
391 * we may get extra interrupts.
392 */
393 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
394 pipea_stats = I915_READ(PIPEASTAT);
395 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
396 pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
397 PIPE_VBLANK_INTERRUPT_ENABLE);
398 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
399 PIPE_VBLANK_INTERRUPT_STATUS)) {
400 vblank++;
401 drm_handle_vblank(dev, i915_get_plane(dev, 0));
402 }
247 403
248 if (temp == 0) 404 I915_WRITE(PIPEASTAT, pipea_stats);
249 return IRQ_NONE; 405 }
406 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
407 pipeb_stats = I915_READ(PIPEBSTAT);
408 /* Ack the event */
409 I915_WRITE(PIPEBSTAT, pipeb_stats);
410
411 /* The vblank interrupt gets enabled even if we didn't ask for
412 it, so make sure it's shut down again */
413 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
414 pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
415 PIPE_VBLANK_INTERRUPT_ENABLE);
416 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
417 PIPE_VBLANK_INTERRUPT_STATUS)) {
418 vblank++;
419 drm_handle_vblank(dev, i915_get_plane(dev, 1));
420 }
250 421
251 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 422 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
252 (void) I915_READ16(I915REG_INT_IDENTITY_R); 423 opregion_asle_intr(dev);
253 DRM_READMEMORYBARRIER(); 424 I915_WRITE(PIPEBSTAT, pipeb_stats);
425 }
426
427 I915_WRITE(IIR, iir);
428 if (dev->pdev->msi_enabled)
429 I915_WRITE(IMR, dev_priv->irq_mask_reg);
430 (void) I915_READ(IIR); /* Flush posted writes */
254 431
255 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 432 if (dev_priv->sarea_priv)
433 dev_priv->sarea_priv->last_dispatch =
434 READ_BREADCRUMB(dev_priv);
256 435
257 if (temp & USER_INT_FLAG) 436 if (iir & I915_USER_INTERRUPT) {
437 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
258 DRM_WAKEUP(&dev_priv->irq_queue); 438 DRM_WAKEUP(&dev_priv->irq_queue);
439 }
259 440
260 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { 441 if (iir & I915_ASLE_INTERRUPT)
261 int vblank_pipe = dev_priv->vblank_pipe; 442 opregion_asle_intr(dev);
262 443
263 if ((vblank_pipe & 444 if (vblank && dev_priv->swaps_pending > 0) {
264 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) 445 if (dev_priv->ring.ring_obj == NULL)
265 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
266 if (temp & VSYNC_PIPEA_FLAG)
267 atomic_inc(&dev->vbl_received);
268 if (temp & VSYNC_PIPEB_FLAG)
269 atomic_inc(&dev->vbl_received2);
270 } else if (((temp & VSYNC_PIPEA_FLAG) &&
271 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
272 ((temp & VSYNC_PIPEB_FLAG) &&
273 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
274 atomic_inc(&dev->vbl_received);
275
276 DRM_WAKEUP(&dev->vbl_queue);
277 drm_vbl_send_signals(dev);
278
279 if (dev_priv->swaps_pending > 0)
280 drm_locked_tasklet(dev, i915_vblank_tasklet); 446 drm_locked_tasklet(dev, i915_vblank_tasklet);
281 I915_WRITE(I915REG_PIPEASTAT, 447 else
282 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| 448 schedule_work(&dev_priv->mm.vblank_work);
283 I915_VBLANK_CLEAR);
284 I915_WRITE(I915REG_PIPEBSTAT,
285 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
286 I915_VBLANK_CLEAR);
287 } 449 }
288 450
289 return IRQ_HANDLED; 451 return IRQ_HANDLED;
@@ -298,23 +460,45 @@ static int i915_emit_irq(struct drm_device * dev)
298 460
299 DRM_DEBUG("\n"); 461 DRM_DEBUG("\n");
300 462
301 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 463 dev_priv->counter++;
302
303 if (dev_priv->counter > 0x7FFFFFFFUL) 464 if (dev_priv->counter > 0x7FFFFFFFUL)
304 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 465 dev_priv->counter = 1;
466 if (dev_priv->sarea_priv)
467 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
305 468
306 BEGIN_LP_RING(6); 469 BEGIN_LP_RING(6);
307 OUT_RING(CMD_STORE_DWORD_IDX); 470 OUT_RING(MI_STORE_DWORD_INDEX);
308 OUT_RING(20); 471 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
309 OUT_RING(dev_priv->counter); 472 OUT_RING(dev_priv->counter);
310 OUT_RING(0); 473 OUT_RING(0);
311 OUT_RING(0); 474 OUT_RING(0);
312 OUT_RING(GFX_OP_USER_INTERRUPT); 475 OUT_RING(MI_USER_INTERRUPT);
313 ADVANCE_LP_RING(); 476 ADVANCE_LP_RING();
314 477
315 return dev_priv->counter; 478 return dev_priv->counter;
316} 479}
317 480
481void i915_user_irq_get(struct drm_device *dev)
482{
483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
484
485 spin_lock(&dev_priv->user_irq_lock);
486 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
487 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
488 spin_unlock(&dev_priv->user_irq_lock);
489}
490
491void i915_user_irq_put(struct drm_device *dev)
492{
493 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
494
495 spin_lock(&dev_priv->user_irq_lock);
496 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
497 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
498 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
499 spin_unlock(&dev_priv->user_irq_lock);
500}
501
318static int i915_wait_irq(struct drm_device * dev, int irq_nr) 502static int i915_wait_irq(struct drm_device * dev, int irq_nr)
319{ 503{
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -323,55 +507,34 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
323 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 507 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
324 READ_BREADCRUMB(dev_priv)); 508 READ_BREADCRUMB(dev_priv));
325 509
326 if (READ_BREADCRUMB(dev_priv) >= irq_nr) 510 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
511 if (dev_priv->sarea_priv) {
512 dev_priv->sarea_priv->last_dispatch =
513 READ_BREADCRUMB(dev_priv);
514 }
327 return 0; 515 return 0;
516 }
328 517
329 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 518 if (dev_priv->sarea_priv)
519 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
330 520
521 i915_user_irq_get(dev);
331 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 522 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
332 READ_BREADCRUMB(dev_priv) >= irq_nr); 523 READ_BREADCRUMB(dev_priv) >= irq_nr);
524 i915_user_irq_put(dev);
333 525
334 if (ret == -EBUSY) { 526 if (ret == -EBUSY) {
335 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 527 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
336 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 528 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
337 } 529 }
338 530
339 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 531 if (dev_priv->sarea_priv)
340 return ret; 532 dev_priv->sarea_priv->last_dispatch =
341} 533 READ_BREADCRUMB(dev_priv);
342
343static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
344 atomic_t *counter)
345{
346 drm_i915_private_t *dev_priv = dev->dev_private;
347 unsigned int cur_vblank;
348 int ret = 0;
349
350 if (!dev_priv) {
351 DRM_ERROR("called with no initialization\n");
352 return -EINVAL;
353 }
354
355 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
356 (((cur_vblank = atomic_read(counter))
357 - *sequence) <= (1<<23)));
358
359 *sequence = cur_vblank;
360 534
361 return ret; 535 return ret;
362} 536}
363 537
364
365int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
366{
367 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
368}
369
370int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
371{
372 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
373}
374
375/* Needs the lock as it touches the ring. 538/* Needs the lock as it touches the ring.
376 */ 539 */
377int i915_irq_emit(struct drm_device *dev, void *data, 540int i915_irq_emit(struct drm_device *dev, void *data,
@@ -381,14 +544,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
381 drm_i915_irq_emit_t *emit = data; 544 drm_i915_irq_emit_t *emit = data;
382 int result; 545 int result;
383 546
384 LOCK_TEST_WITH_RETURN(dev, file_priv); 547 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
385 548
386 if (!dev_priv) { 549 if (!dev_priv) {
387 DRM_ERROR("called with no initialization\n"); 550 DRM_ERROR("called with no initialization\n");
388 return -EINVAL; 551 return -EINVAL;
389 } 552 }
390 553 mutex_lock(&dev->struct_mutex);
391 result = i915_emit_irq(dev); 554 result = i915_emit_irq(dev);
555 mutex_unlock(&dev->struct_mutex);
392 556
393 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 557 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
394 DRM_ERROR("copy_to_user\n"); 558 DRM_ERROR("copy_to_user\n");
@@ -414,18 +578,74 @@ int i915_irq_wait(struct drm_device *dev, void *data,
414 return i915_wait_irq(dev, irqwait->irq_seq); 578 return i915_wait_irq(dev, irqwait->irq_seq);
415} 579}
416 580
417static void i915_enable_interrupt (struct drm_device *dev) 581int i915_enable_vblank(struct drm_device *dev, int plane)
418{ 582{
419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
420 u16 flag; 584 int pipe = i915_get_pipe(dev, plane);
585 u32 pipestat_reg = 0;
586 u32 pipestat;
587
588 switch (pipe) {
589 case 0:
590 pipestat_reg = PIPEASTAT;
591 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
592 break;
593 case 1:
594 pipestat_reg = PIPEBSTAT;
595 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
596 break;
597 default:
598 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
599 pipe);
600 break;
601 }
602
603 if (pipestat_reg) {
604 pipestat = I915_READ(pipestat_reg);
605 if (IS_I965G(dev))
606 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
607 else
608 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
609 /* Clear any stale interrupt status */
610 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
611 PIPE_VBLANK_INTERRUPT_STATUS);
612 I915_WRITE(pipestat_reg, pipestat);
613 }
614
615 return 0;
616}
421 617
422 flag = 0; 618void i915_disable_vblank(struct drm_device *dev, int plane)
423 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) 619{
424 flag |= VSYNC_PIPEA_FLAG; 620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
425 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) 621 int pipe = i915_get_pipe(dev, plane);
426 flag |= VSYNC_PIPEB_FLAG; 622 u32 pipestat_reg = 0;
623 u32 pipestat;
624
625 switch (pipe) {
626 case 0:
627 pipestat_reg = PIPEASTAT;
628 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
629 break;
630 case 1:
631 pipestat_reg = PIPEBSTAT;
632 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
633 break;
634 default:
635 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
636 pipe);
637 break;
638 }
427 639
428 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); 640 if (pipestat_reg) {
641 pipestat = I915_READ(pipestat_reg);
642 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
643 PIPE_VBLANK_INTERRUPT_ENABLE);
644 /* Clear any stale interrupt status */
645 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
646 PIPE_VBLANK_INTERRUPT_STATUS);
647 I915_WRITE(pipestat_reg, pipestat);
648 }
429} 649}
430 650
431/* Set the vblank monitor pipe 651/* Set the vblank monitor pipe
@@ -434,22 +654,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
434 struct drm_file *file_priv) 654 struct drm_file *file_priv)
435{ 655{
436 drm_i915_private_t *dev_priv = dev->dev_private; 656 drm_i915_private_t *dev_priv = dev->dev_private;
437 drm_i915_vblank_pipe_t *pipe = data;
438 657
439 if (!dev_priv) { 658 if (!dev_priv) {
440 DRM_ERROR("called with no initialization\n"); 659 DRM_ERROR("called with no initialization\n");
441 return -EINVAL; 660 return -EINVAL;
442 } 661 }
443 662
444 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
445 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
446 return -EINVAL;
447 }
448
449 dev_priv->vblank_pipe = pipe->pipe;
450
451 i915_enable_interrupt (dev);
452
453 return 0; 663 return 0;
454} 664}
455 665
@@ -458,19 +668,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
458{ 668{
459 drm_i915_private_t *dev_priv = dev->dev_private; 669 drm_i915_private_t *dev_priv = dev->dev_private;
460 drm_i915_vblank_pipe_t *pipe = data; 670 drm_i915_vblank_pipe_t *pipe = data;
461 u16 flag;
462 671
463 if (!dev_priv) { 672 if (!dev_priv) {
464 DRM_ERROR("called with no initialization\n"); 673 DRM_ERROR("called with no initialization\n");
465 return -EINVAL; 674 return -EINVAL;
466 } 675 }
467 676
468 flag = I915_READ(I915REG_INT_ENABLE_R); 677 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
469 pipe->pipe = 0;
470 if (flag & VSYNC_PIPEA_FLAG)
471 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
472 if (flag & VSYNC_PIPEB_FLAG)
473 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
474 678
475 return 0; 679 return 0;
476} 680}
@@ -484,11 +688,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
484 drm_i915_private_t *dev_priv = dev->dev_private; 688 drm_i915_private_t *dev_priv = dev->dev_private;
485 drm_i915_vblank_swap_t *swap = data; 689 drm_i915_vblank_swap_t *swap = data;
486 drm_i915_vbl_swap_t *vbl_swap; 690 drm_i915_vbl_swap_t *vbl_swap;
487 unsigned int pipe, seqtype, curseq; 691 unsigned int pipe, seqtype, curseq, plane;
488 unsigned long irqflags; 692 unsigned long irqflags;
489 struct list_head *list; 693 struct list_head *list;
694 int ret;
490 695
491 if (!dev_priv) { 696 if (!dev_priv || !dev_priv->sarea_priv) {
492 DRM_ERROR("%s called with no initialization\n", __func__); 697 DRM_ERROR("%s called with no initialization\n", __func__);
493 return -EINVAL; 698 return -EINVAL;
494 } 699 }
@@ -504,7 +709,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
504 return -EINVAL; 709 return -EINVAL;
505 } 710 }
506 711
507 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 712 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
713 pipe = i915_get_pipe(dev, plane);
508 714
509 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 715 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
510 716
@@ -523,7 +729,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
523 729
524 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 730 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
525 731
526 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 732 /*
733 * We take the ref here and put it when the swap actually completes
734 * in the tasklet.
735 */
736 ret = drm_vblank_get(dev, pipe);
737 if (ret)
738 return ret;
739 curseq = drm_vblank_count(dev, pipe);
527 740
528 if (seqtype == _DRM_VBLANK_RELATIVE) 741 if (seqtype == _DRM_VBLANK_RELATIVE)
529 swap->sequence += curseq; 742 swap->sequence += curseq;
@@ -533,6 +746,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
533 swap->sequence = curseq + 1; 746 swap->sequence = curseq + 1;
534 } else { 747 } else {
535 DRM_DEBUG("Missed target sequence\n"); 748 DRM_DEBUG("Missed target sequence\n");
749 drm_vblank_put(dev, pipe);
536 return -EINVAL; 750 return -EINVAL;
537 } 751 }
538 } 752 }
@@ -543,7 +757,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
543 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 757 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
544 758
545 if (vbl_swap->drw_id == swap->drawable && 759 if (vbl_swap->drw_id == swap->drawable &&
546 vbl_swap->pipe == pipe && 760 vbl_swap->plane == plane &&
547 vbl_swap->sequence == swap->sequence) { 761 vbl_swap->sequence == swap->sequence) {
548 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 762 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
549 DRM_DEBUG("Already scheduled\n"); 763 DRM_DEBUG("Already scheduled\n");
@@ -555,6 +769,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
555 769
556 if (dev_priv->swaps_pending >= 100) { 770 if (dev_priv->swaps_pending >= 100) {
557 DRM_DEBUG("Too many swaps queued\n"); 771 DRM_DEBUG("Too many swaps queued\n");
772 drm_vblank_put(dev, pipe);
558 return -EBUSY; 773 return -EBUSY;
559 } 774 }
560 775
@@ -562,13 +777,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
562 777
563 if (!vbl_swap) { 778 if (!vbl_swap) {
564 DRM_ERROR("Failed to allocate memory to queue swap\n"); 779 DRM_ERROR("Failed to allocate memory to queue swap\n");
780 drm_vblank_put(dev, pipe);
565 return -ENOMEM; 781 return -ENOMEM;
566 } 782 }
567 783
568 DRM_DEBUG("\n"); 784 DRM_DEBUG("\n");
569 785
570 vbl_swap->drw_id = swap->drawable; 786 vbl_swap->drw_id = swap->drawable;
571 vbl_swap->pipe = pipe; 787 vbl_swap->plane = plane;
572 vbl_swap->sequence = swap->sequence; 788 vbl_swap->sequence = swap->sequence;
573 789
574 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 790 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
@@ -587,37 +803,63 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
587{ 803{
588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589 805
590 I915_WRITE16(I915REG_HWSTAM, 0xfffe); 806 I915_WRITE(HWSTAM, 0xeffe);
591 I915_WRITE16(I915REG_INT_MASK_R, 0x0); 807 I915_WRITE(IMR, 0xffffffff);
592 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 808 I915_WRITE(IER, 0x0);
593} 809}
594 810
595void i915_driver_irq_postinstall(struct drm_device * dev) 811int i915_driver_irq_postinstall(struct drm_device *dev)
596{ 812{
597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 813 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
814 int ret, num_pipes = 2;
598 815
599 spin_lock_init(&dev_priv->swaps_lock); 816 spin_lock_init(&dev_priv->swaps_lock);
600 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 817 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
601 dev_priv->swaps_pending = 0; 818 dev_priv->swaps_pending = 0;
602 819
603 if (!dev_priv->vblank_pipe) 820 /* Set initial unmasked IRQs to just the selected vblank pipes. */
604 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 821 dev_priv->irq_mask_reg = ~0;
605 i915_enable_interrupt(dev); 822
823 ret = drm_vblank_init(dev, num_pipes);
824 if (ret)
825 return ret;
826
827 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
828 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
829 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
830
831 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
832
833 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
834
835 I915_WRITE(IMR, dev_priv->irq_mask_reg);
836 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
837 (void) I915_READ(IER);
838
839 opregion_enable_asle(dev);
606 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 840 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
841
842 return 0;
607} 843}
608 844
609void i915_driver_irq_uninstall(struct drm_device * dev) 845void i915_driver_irq_uninstall(struct drm_device * dev)
610{ 846{
611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 847 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
612 u16 temp; 848 u32 temp;
613 849
614 if (!dev_priv) 850 if (!dev_priv)
615 return; 851 return;
616 852
617 I915_WRITE16(I915REG_HWSTAM, 0xffff); 853 dev_priv->vblank_pipe = 0;
618 I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 854
619 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 855 I915_WRITE(HWSTAM, 0xffffffff);
856 I915_WRITE(IMR, 0xffffffff);
857 I915_WRITE(IER, 0x0);
620 858
621 temp = I915_READ16(I915REG_INT_IDENTITY_R); 859 temp = I915_READ(PIPEASTAT);
622 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 860 I915_WRITE(PIPEASTAT, temp);
861 temp = I915_READ(PIPEBSTAT);
862 I915_WRITE(PIPEBSTAT, temp);
863 temp = I915_READ(IIR);
864 I915_WRITE(IIR, temp);
623} 865}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
new file mode 100644
index 000000000000..1787a0c7e3ab
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -0,0 +1,371 @@
1/*
2 * Copyright 2008 Intel Corporation <hong.liu@intel.com>
3 * Copyright 2008 Red Hat <mjg@redhat.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28#include <linux/acpi.h>
29
30#include "drmP.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34#define PCI_ASLE 0xe4
35#define PCI_LBPC 0xf4
36#define PCI_ASLS 0xfc
37
38#define OPREGION_SZ (8*1024)
39#define OPREGION_HEADER_OFFSET 0
40#define OPREGION_ACPI_OFFSET 0x100
41#define OPREGION_SWSCI_OFFSET 0x200
42#define OPREGION_ASLE_OFFSET 0x300
43#define OPREGION_VBT_OFFSET 0x1000
44
45#define OPREGION_SIGNATURE "IntelGraphicsMem"
46#define MBOX_ACPI (1<<0)
47#define MBOX_SWSCI (1<<1)
48#define MBOX_ASLE (1<<2)
49
50struct opregion_header {
51 u8 signature[16];
52 u32 size;
53 u32 opregion_ver;
54 u8 bios_ver[32];
55 u8 vbios_ver[16];
56 u8 driver_ver[16];
57 u32 mboxes;
58 u8 reserved[164];
59} __attribute__((packed));
60
61/* OpRegion mailbox #1: public ACPI methods */
62struct opregion_acpi {
63 u32 drdy; /* driver readiness */
64 u32 csts; /* notification status */
65 u32 cevt; /* current event */
66 u8 rsvd1[20];
67 u32 didl[8]; /* supported display devices ID list */
68 u32 cpdl[8]; /* currently presented display list */
69 u32 cadl[8]; /* currently active display list */
70 u32 nadl[8]; /* next active devices list */
71 u32 aslp; /* ASL sleep time-out */
72 u32 tidx; /* toggle table index */
73 u32 chpd; /* current hotplug enable indicator */
74 u32 clid; /* current lid state*/
75 u32 cdck; /* current docking state */
76 u32 sxsw; /* Sx state resume */
77 u32 evts; /* ASL supported events */
78 u32 cnot; /* current OS notification */
79 u32 nrdy; /* driver status */
80 u8 rsvd2[60];
81} __attribute__((packed));
82
83/* OpRegion mailbox #2: SWSCI */
84struct opregion_swsci {
85 u32 scic; /* SWSCI command|status|data */
86 u32 parm; /* command parameters */
87 u32 dslp; /* driver sleep time-out */
88 u8 rsvd[244];
89} __attribute__((packed));
90
91/* OpRegion mailbox #3: ASLE */
92struct opregion_asle {
93 u32 ardy; /* driver readiness */
94 u32 aslc; /* ASLE interrupt command */
95 u32 tche; /* technology enabled indicator */
96 u32 alsi; /* current ALS illuminance reading */
97 u32 bclp; /* backlight brightness to set */
98 u32 pfit; /* panel fitting state */
99 u32 cblv; /* current brightness level */
100 u16 bclm[20]; /* backlight level duty cycle mapping table */
101 u32 cpfm; /* current panel fitting mode */
102 u32 epfm; /* enabled panel fitting modes */
103 u8 plut[74]; /* panel LUT and identifier */
104 u32 pfmb; /* PWM freq and min brightness */
105 u8 rsvd[102];
106} __attribute__((packed));
107
108/* ASLE irq request bits */
109#define ASLE_SET_ALS_ILLUM (1 << 0)
110#define ASLE_SET_BACKLIGHT (1 << 1)
111#define ASLE_SET_PFIT (1 << 2)
112#define ASLE_SET_PWM_FREQ (1 << 3)
113#define ASLE_REQ_MSK 0xf
114
115/* response bits of ASLE irq request */
116#define ASLE_ALS_ILLUM_FAIL (2<<10)
117#define ASLE_BACKLIGHT_FAIL (2<<12)
118#define ASLE_PFIT_FAIL (2<<14)
119#define ASLE_PWM_FREQ_FAIL (2<<16)
120
121/* ASLE backlight brightness to set */
122#define ASLE_BCLP_VALID (1<<31)
123#define ASLE_BCLP_MSK (~(1<<31))
124
125/* ASLE panel fitting request */
126#define ASLE_PFIT_VALID (1<<31)
127#define ASLE_PFIT_CENTER (1<<0)
128#define ASLE_PFIT_STRETCH_TEXT (1<<1)
129#define ASLE_PFIT_STRETCH_GFX (1<<2)
130
131/* PWM frequency and minimum brightness */
132#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
133#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
134#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
135#define ASLE_PFMB_PWM_VALID (1<<31)
136
137#define ASLE_CBLV_VALID (1<<31)
138
139static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
140{
141 struct drm_i915_private *dev_priv = dev->dev_private;
142 struct opregion_asle *asle = dev_priv->opregion.asle;
143 u32 blc_pwm_ctl, blc_pwm_ctl2;
144
145 if (!(bclp & ASLE_BCLP_VALID))
146 return ASLE_BACKLIGHT_FAIL;
147
148 bclp &= ASLE_BCLP_MSK;
149 if (bclp < 0 || bclp > 255)
150 return ASLE_BACKLIGHT_FAIL;
151
152 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
153 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
154 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
155
156 if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
157 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
158 else
159 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
160
161 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
162
163 return 0;
164}
165
166static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
167{
168 /* alsi is the current ALS reading in lux. 0 indicates below sensor
169 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
170 return 0;
171}
172
173static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
174{
175 struct drm_i915_private *dev_priv = dev->dev_private;
176 if (pfmb & ASLE_PFMB_PWM_VALID) {
177 u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
178 u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
179 blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
180 pwm = pwm >> 9;
181 /* FIXME - what do we do with the PWM? */
182 }
183 return 0;
184}
185
186static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
187{
188 /* Panel fitting is currently controlled by the X code, so this is a
189 noop until modesetting support works fully */
190 if (!(pfit & ASLE_PFIT_VALID))
191 return ASLE_PFIT_FAIL;
192 return 0;
193}
194
195void opregion_asle_intr(struct drm_device *dev)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct opregion_asle *asle = dev_priv->opregion.asle;
199 u32 asle_stat = 0;
200 u32 asle_req;
201
202 if (!asle)
203 return;
204
205 asle_req = asle->aslc & ASLE_REQ_MSK;
206
207 if (!asle_req) {
208 DRM_DEBUG("non asle set request??\n");
209 return;
210 }
211
212 if (asle_req & ASLE_SET_ALS_ILLUM)
213 asle_stat |= asle_set_als_illum(dev, asle->alsi);
214
215 if (asle_req & ASLE_SET_BACKLIGHT)
216 asle_stat |= asle_set_backlight(dev, asle->bclp);
217
218 if (asle_req & ASLE_SET_PFIT)
219 asle_stat |= asle_set_pfit(dev, asle->pfit);
220
221 if (asle_req & ASLE_SET_PWM_FREQ)
222 asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
223
224 asle->aslc = asle_stat;
225}
226
227#define ASLE_ALS_EN (1<<0)
228#define ASLE_BLC_EN (1<<1)
229#define ASLE_PFIT_EN (1<<2)
230#define ASLE_PFMB_EN (1<<3)
231
232void opregion_enable_asle(struct drm_device *dev)
233{
234 struct drm_i915_private *dev_priv = dev->dev_private;
235 struct opregion_asle *asle = dev_priv->opregion.asle;
236
237 if (asle) {
238 u32 pipeb_stats = I915_READ(PIPEBSTAT);
239 if (IS_MOBILE(dev)) {
240 /* Many devices trigger events with a write to the
241 legacy backlight controller, so we need to ensure
242 that it's able to generate interrupts */
243 I915_WRITE(PIPEBSTAT, pipeb_stats |=
244 I915_LEGACY_BLC_EVENT_ENABLE);
245 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
246 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
247 } else
248 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
249
250 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
251 ASLE_PFMB_EN;
252 asle->ardy = 1;
253 }
254}
255
256#define ACPI_EV_DISPLAY_SWITCH (1<<0)
257#define ACPI_EV_LID (1<<1)
258#define ACPI_EV_DOCK (1<<2)
259
260static struct intel_opregion *system_opregion;
261
262int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
263 void *data)
264{
265 /* The only video events relevant to opregion are 0x80. These indicate
266 either a docking event, lid switch or display switch request. In
267 Linux, these are handled by the dock, button and video drivers.
268 We might want to fix the video driver to be opregion-aware in
269 future, but right now we just indicate to the firmware that the
270 request has been handled */
271
272 struct opregion_acpi *acpi;
273
274 if (!system_opregion)
275 return NOTIFY_DONE;
276
277 acpi = system_opregion->acpi;
278 acpi->csts = 0;
279
280 return NOTIFY_OK;
281}
282
283static struct notifier_block intel_opregion_notifier = {
284 .notifier_call = intel_opregion_video_event,
285};
286
287int intel_opregion_init(struct drm_device *dev)
288{
289 struct drm_i915_private *dev_priv = dev->dev_private;
290 struct intel_opregion *opregion = &dev_priv->opregion;
291 void *base;
292 u32 asls, mboxes;
293 int err = 0;
294
295 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
296 DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
297 if (asls == 0) {
298 DRM_DEBUG("ACPI OpRegion not supported!\n");
299 return -ENOTSUPP;
300 }
301
302 base = ioremap(asls, OPREGION_SZ);
303 if (!base)
304 return -ENOMEM;
305
306 opregion->header = base;
307 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
308 DRM_DEBUG("opregion signature mismatch\n");
309 err = -EINVAL;
310 goto err_out;
311 }
312
313 mboxes = opregion->header->mboxes;
314 if (mboxes & MBOX_ACPI) {
315 DRM_DEBUG("Public ACPI methods supported\n");
316 opregion->acpi = base + OPREGION_ACPI_OFFSET;
317 } else {
318 DRM_DEBUG("Public ACPI methods not supported\n");
319 err = -ENOTSUPP;
320 goto err_out;
321 }
322 opregion->enabled = 1;
323
324 if (mboxes & MBOX_SWSCI) {
325 DRM_DEBUG("SWSCI supported\n");
326 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
327 }
328 if (mboxes & MBOX_ASLE) {
329 DRM_DEBUG("ASLE supported\n");
330 opregion->asle = base + OPREGION_ASLE_OFFSET;
331 }
332
333 /* Notify BIOS we are ready to handle ACPI video ext notifs.
334 * Right now, all the events are handled by the ACPI video module.
335 * We don't actually need to do anything with them. */
336 opregion->acpi->csts = 0;
337 opregion->acpi->drdy = 1;
338
339 system_opregion = opregion;
340 register_acpi_notifier(&intel_opregion_notifier);
341
342 return 0;
343
344err_out:
345 iounmap(opregion->header);
346 opregion->header = NULL;
347 return err;
348}
349
350void intel_opregion_free(struct drm_device *dev)
351{
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 struct intel_opregion *opregion = &dev_priv->opregion;
354
355 if (!opregion->enabled)
356 return;
357
358 opregion->acpi->drdy = 0;
359
360 system_opregion = NULL;
361 unregister_acpi_notifier(&intel_opregion_notifier);
362
363 /* just clear all opregion memory pointers now */
364 iounmap(opregion->header);
365 opregion->header = NULL;
366 opregion->acpi = NULL;
367 opregion->swsci = NULL;
368 opregion->asle = NULL;
369
370 opregion->enabled = 0;
371}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
new file mode 100644
index 000000000000..5c2d9f206d05
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -0,0 +1,1417 @@
1/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
2 * All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _I915_REG_H_
26#define _I915_REG_H_
27
28/*
29 * The Bridge device's PCI config space has information about the
30 * fb aperture size and the amount of pre-reserved memory.
31 */
32#define INTEL_GMCH_CTRL 0x52
33#define INTEL_GMCH_ENABLED 0x4
34#define INTEL_GMCH_MEM_MASK 0x1
35#define INTEL_GMCH_MEM_64M 0x1
36#define INTEL_GMCH_MEM_128M 0
37
38#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
39#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
40#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
41#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
42#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
43#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
44#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
45
46#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
47#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
48
49/* PCI config space */
50
51#define HPLLCC 0xc0 /* 855 only */
52#define GC_CLOCK_CONTROL_MASK (3 << 0)
53#define GC_CLOCK_133_200 (0 << 0)
54#define GC_CLOCK_100_200 (1 << 0)
55#define GC_CLOCK_100_133 (2 << 0)
56#define GC_CLOCK_166_250 (3 << 0)
57#define GCFGC 0xf0 /* 915+ only */
58#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
59#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
60#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
61#define GC_DISPLAY_CLOCK_MASK (7 << 4)
62#define LBB 0xf4
63
64/* VGA stuff */
65
66#define VGA_ST01_MDA 0x3ba
67#define VGA_ST01_CGA 0x3da
68
69#define VGA_MSR_WRITE 0x3c2
70#define VGA_MSR_READ 0x3cc
71#define VGA_MSR_MEM_EN (1<<1)
72#define VGA_MSR_CGA_MODE (1<<0)
73
74#define VGA_SR_INDEX 0x3c4
75#define VGA_SR_DATA 0x3c5
76
77#define VGA_AR_INDEX 0x3c0
78#define VGA_AR_VID_EN (1<<5)
79#define VGA_AR_DATA_WRITE 0x3c0
80#define VGA_AR_DATA_READ 0x3c1
81
82#define VGA_GR_INDEX 0x3ce
83#define VGA_GR_DATA 0x3cf
84/* GR05 */
85#define VGA_GR_MEM_READ_MODE_SHIFT 3
86#define VGA_GR_MEM_READ_MODE_PLANE 1
87/* GR06 */
88#define VGA_GR_MEM_MODE_MASK 0xc
89#define VGA_GR_MEM_MODE_SHIFT 2
90#define VGA_GR_MEM_A0000_AFFFF 0
91#define VGA_GR_MEM_A0000_BFFFF 1
92#define VGA_GR_MEM_B0000_B7FFF 2
93#define VGA_GR_MEM_B0000_BFFFF 3
94
95#define VGA_DACMASK 0x3c6
96#define VGA_DACRX 0x3c7
97#define VGA_DACWX 0x3c8
98#define VGA_DACDATA 0x3c9
99
100#define VGA_CR_INDEX_MDA 0x3b4
101#define VGA_CR_DATA_MDA 0x3b5
102#define VGA_CR_INDEX_CGA 0x3d4
103#define VGA_CR_DATA_CGA 0x3d5
104
105/*
106 * Memory interface instructions used by the kernel
107 */
108#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
109
110#define MI_NOOP MI_INSTR(0, 0)
111#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
112#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
113#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
114#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
115#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
116#define MI_FLUSH MI_INSTR(0x04, 0)
117#define MI_READ_FLUSH (1 << 0)
118#define MI_EXE_FLUSH (1 << 1)
119#define MI_NO_WRITE_FLUSH (1 << 2)
120#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
121#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
122#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
123#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
124#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
125#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
126#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
127#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
128#define MI_STORE_DWORD_INDEX_SHIFT 2
129#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
130#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
131#define MI_BATCH_NON_SECURE (1)
132#define MI_BATCH_NON_SECURE_I965 (1<<8)
133#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
134
135/*
136 * 3D instructions used by the kernel
137 */
138#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
139
140#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
141#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
142#define SC_UPDATE_SCISSOR (0x1<<1)
143#define SC_ENABLE_MASK (0x1<<0)
144#define SC_ENABLE (0x1<<0)
145#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
146#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
147#define SCI_YMIN_MASK (0xffff<<16)
148#define SCI_XMIN_MASK (0xffff<<0)
149#define SCI_YMAX_MASK (0xffff<<16)
150#define SCI_XMAX_MASK (0xffff<<0)
151#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
152#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
153#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
154#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
155#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
156#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
157#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
158#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
159#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
160#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
161#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
162#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
163#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
164#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
165#define BLT_DEPTH_8 (0<<24)
166#define BLT_DEPTH_16_565 (1<<24)
167#define BLT_DEPTH_16_1555 (2<<24)
168#define BLT_DEPTH_32 (3<<24)
169#define BLT_ROP_GXCOPY (0xcc<<16)
170#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
171#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
172#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
173#define ASYNC_FLIP (1<<22)
174#define DISPLAY_PLANE_A (0<<20)
175#define DISPLAY_PLANE_B (1<<20)
176
177/*
178 * Instruction and interrupt control regs
179 */
180
181#define PRB0_TAIL 0x02030
182#define PRB0_HEAD 0x02034
183#define PRB0_START 0x02038
184#define PRB0_CTL 0x0203c
185#define TAIL_ADDR 0x001FFFF8
186#define HEAD_WRAP_COUNT 0xFFE00000
187#define HEAD_WRAP_ONE 0x00200000
188#define HEAD_ADDR 0x001FFFFC
189#define RING_NR_PAGES 0x001FF000
190#define RING_REPORT_MASK 0x00000006
191#define RING_REPORT_64K 0x00000002
192#define RING_REPORT_128K 0x00000004
193#define RING_NO_REPORT 0x00000000
194#define RING_VALID_MASK 0x00000001
195#define RING_VALID 0x00000001
196#define RING_INVALID 0x00000000
197#define PRB1_TAIL 0x02040 /* 915+ only */
198#define PRB1_HEAD 0x02044 /* 915+ only */
199#define PRB1_START 0x02048 /* 915+ only */
200#define PRB1_CTL 0x0204c /* 915+ only */
201#define ACTHD_I965 0x02074
202#define HWS_PGA 0x02080
203#define HWS_ADDRESS_MASK 0xfffff000
204#define HWS_START_ADDRESS_SHIFT 4
205#define IPEIR 0x02088
206#define NOPID 0x02094
207#define HWSTAM 0x02098
208#define SCPD0 0x0209c /* 915+ only */
209#define IER 0x020a0
210#define IIR 0x020a4
211#define IMR 0x020a8
212#define ISR 0x020ac
213#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
214#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
215#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
216#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
217#define I915_HWB_OOM_INTERRUPT (1<<13)
218#define I915_SYNC_STATUS_INTERRUPT (1<<12)
219#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
220#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
221#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
222#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
223#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
224#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
225#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
226#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
227#define I915_DEBUG_INTERRUPT (1<<2)
228#define I915_USER_INTERRUPT (1<<1)
229#define I915_ASLE_INTERRUPT (1<<0)
230#define EIR 0x020b0
231#define EMR 0x020b4
232#define ESR 0x020b8
233#define INSTPM 0x020c0
234#define ACTHD 0x020c8
235#define FW_BLC 0x020d8
236#define FW_BLC_SELF 0x020e0 /* 915+ only */
237#define MI_ARB_STATE 0x020e4 /* 915+ only */
238#define CACHE_MODE_0 0x02120 /* 915+ only */
239#define CM0_MASK_SHIFT 16
240#define CM0_IZ_OPT_DISABLE (1<<6)
241#define CM0_ZR_OPT_DISABLE (1<<5)
242#define CM0_DEPTH_EVICT_DISABLE (1<<4)
243#define CM0_COLOR_EVICT_DISABLE (1<<3)
244#define CM0_DEPTH_WRITE_DISABLE (1<<1)
245#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
246#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
247
248/*
249 * Framebuffer compression (915+ only)
250 */
251
252#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
253#define FBC_LL_BASE 0x03204 /* 4k page aligned */
254#define FBC_CONTROL 0x03208
255#define FBC_CTL_EN (1<<31)
256#define FBC_CTL_PERIODIC (1<<30)
257#define FBC_CTL_INTERVAL_SHIFT (16)
258#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
259#define FBC_CTL_STRIDE_SHIFT (5)
260#define FBC_CTL_FENCENO (1<<0)
261#define FBC_COMMAND 0x0320c
262#define FBC_CMD_COMPRESS (1<<0)
263#define FBC_STATUS 0x03210
264#define FBC_STAT_COMPRESSING (1<<31)
265#define FBC_STAT_COMPRESSED (1<<30)
266#define FBC_STAT_MODIFIED (1<<29)
267#define FBC_STAT_CURRENT_LINE (1<<0)
268#define FBC_CONTROL2 0x03214
269#define FBC_CTL_FENCE_DBL (0<<4)
270#define FBC_CTL_IDLE_IMM (0<<2)
271#define FBC_CTL_IDLE_FULL (1<<2)
272#define FBC_CTL_IDLE_LINE (2<<2)
273#define FBC_CTL_IDLE_DEBUG (3<<2)
274#define FBC_CTL_CPU_FENCE (1<<1)
275#define FBC_CTL_PLANEA (0<<0)
276#define FBC_CTL_PLANEB (1<<0)
277#define FBC_FENCE_OFF 0x0321b
278
279#define FBC_LL_SIZE (1536)
280
281/*
282 * GPIO regs
283 */
284#define GPIOA 0x5010
285#define GPIOB 0x5014
286#define GPIOC 0x5018
287#define GPIOD 0x501c
288#define GPIOE 0x5020
289#define GPIOF 0x5024
290#define GPIOG 0x5028
291#define GPIOH 0x502c
292# define GPIO_CLOCK_DIR_MASK (1 << 0)
293# define GPIO_CLOCK_DIR_IN (0 << 1)
294# define GPIO_CLOCK_DIR_OUT (1 << 1)
295# define GPIO_CLOCK_VAL_MASK (1 << 2)
296# define GPIO_CLOCK_VAL_OUT (1 << 3)
297# define GPIO_CLOCK_VAL_IN (1 << 4)
298# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
299# define GPIO_DATA_DIR_MASK (1 << 8)
300# define GPIO_DATA_DIR_IN (0 << 9)
301# define GPIO_DATA_DIR_OUT (1 << 9)
302# define GPIO_DATA_VAL_MASK (1 << 10)
303# define GPIO_DATA_VAL_OUT (1 << 11)
304# define GPIO_DATA_VAL_IN (1 << 12)
305# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
306
307/*
308 * Clock control & power management
309 */
310
311#define VGA0 0x6000
312#define VGA1 0x6004
313#define VGA_PD 0x6010
314#define VGA0_PD_P2_DIV_4 (1 << 7)
315#define VGA0_PD_P1_DIV_2 (1 << 5)
316#define VGA0_PD_P1_SHIFT 0
317#define VGA0_PD_P1_MASK (0x1f << 0)
318#define VGA1_PD_P2_DIV_4 (1 << 15)
319#define VGA1_PD_P1_DIV_2 (1 << 13)
320#define VGA1_PD_P1_SHIFT 8
321#define VGA1_PD_P1_MASK (0x1f << 8)
322#define DPLL_A 0x06014
323#define DPLL_B 0x06018
324#define DPLL_VCO_ENABLE (1 << 31)
325#define DPLL_DVO_HIGH_SPEED (1 << 30)
326#define DPLL_SYNCLOCK_ENABLE (1 << 29)
327#define DPLL_VGA_MODE_DIS (1 << 28)
328#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
329#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
330#define DPLL_MODE_MASK (3 << 26)
331#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
332#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
333#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
334#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
335#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
336#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
337
338#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
339#define I915_CRC_ERROR_ENABLE (1UL<<29)
340#define I915_CRC_DONE_ENABLE (1UL<<28)
341#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
342#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
343#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
344#define I915_DPST_EVENT_ENABLE (1UL<<23)
345#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
346#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
347#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
348#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
349#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
350#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
351#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
352#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
353#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
354#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
355#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
356#define I915_DPST_EVENT_STATUS (1UL<<7)
357#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
358#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
359#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
360#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
361#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
362#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
363
364#define SRX_INDEX 0x3c4
365#define SRX_DATA 0x3c5
366#define SR01 1
367#define SR01_SCREEN_OFF (1<<5)
368
369#define PPCR 0x61204
370#define PPCR_ON (1<<0)
371
372#define DVOB 0x61140
373#define DVOB_ON (1<<31)
374#define DVOC 0x61160
375#define DVOC_ON (1<<31)
376#define LVDS 0x61180
377#define LVDS_ON (1<<31)
378
379#define ADPA 0x61100
380#define ADPA_DPMS_MASK (~(3<<10))
381#define ADPA_DPMS_ON (0<<10)
382#define ADPA_DPMS_SUSPEND (1<<10)
383#define ADPA_DPMS_STANDBY (2<<10)
384#define ADPA_DPMS_OFF (3<<10)
385
386#define RING_TAIL 0x00
387#define TAIL_ADDR 0x001FFFF8
388#define RING_HEAD 0x04
389#define HEAD_WRAP_COUNT 0xFFE00000
390#define HEAD_WRAP_ONE 0x00200000
391#define HEAD_ADDR 0x001FFFFC
392#define RING_START 0x08
393#define START_ADDR 0xFFFFF000
394#define RING_LEN 0x0C
395#define RING_NR_PAGES 0x001FF000
396#define RING_REPORT_MASK 0x00000006
397#define RING_REPORT_64K 0x00000002
398#define RING_REPORT_128K 0x00000004
399#define RING_NO_REPORT 0x00000000
400#define RING_VALID_MASK 0x00000001
401#define RING_VALID 0x00000001
402#define RING_INVALID 0x00000000
403
404/* Scratch pad debug 0 reg:
405 */
406#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
407/*
408 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
409 * this field (only one bit may be set).
410 */
411#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
412#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
413/* i830, required in DVO non-gang */
414#define PLL_P2_DIVIDE_BY_4 (1 << 23)
415#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
416#define PLL_REF_INPUT_DREFCLK (0 << 13)
417#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
418#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
419#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
420#define PLL_REF_INPUT_MASK (3 << 13)
421#define PLL_LOAD_PULSE_PHASE_SHIFT 9
422/*
423 * Parallel to Serial Load Pulse phase selection.
424 * Selects the phase for the 10X DPLL clock for the PCIe
425 * digital display port. The range is 4 to 13; 10 or more
426 * is just a flip delay. The default is 6
427 */
428#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
429#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
430/*
431 * SDVO multiplier for 945G/GM. Not used on 965.
432 */
433#define SDVO_MULTIPLIER_MASK 0x000000ff
434#define SDVO_MULTIPLIER_SHIFT_HIRES 4
435#define SDVO_MULTIPLIER_SHIFT_VGA 0
436#define DPLL_A_MD 0x0601c /* 965+ only */
437/*
438 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
439 *
440 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
441 */
442#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
443#define DPLL_MD_UDI_DIVIDER_SHIFT 24
444/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
445#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
446#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
447/*
448 * SDVO/UDI pixel multiplier.
449 *
450 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
451 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
452 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
453 * dummy bytes in the datastream at an increased clock rate, with both sides of
454 * the link knowing how many bytes are fill.
455 *
456 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
457 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
458 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
459 * through an SDVO command.
460 *
461 * This register field has values of multiplication factor minus 1, with
462 * a maximum multiplier of 5 for SDVO.
463 */
464#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
465#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
466/*
467 * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
468 * This best be set to the default value (3) or the CRT won't work. No,
469 * I don't entirely understand what this does...
470 */
471#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
472#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
473#define DPLL_B_MD 0x06020 /* 965+ only */
474#define FPA0 0x06040
475#define FPA1 0x06044
476#define FPB0 0x06048
477#define FPB1 0x0604c
478#define FP_N_DIV_MASK 0x003f0000
479#define FP_N_DIV_SHIFT 16
480#define FP_M1_DIV_MASK 0x00003f00
481#define FP_M1_DIV_SHIFT 8
482#define FP_M2_DIV_MASK 0x0000003f
483#define FP_M2_DIV_SHIFT 0
484#define DPLL_TEST 0x606c
485#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
486#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
487#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
488#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
489#define DPLLB_TEST_N_BYPASS (1 << 19)
490#define DPLLB_TEST_M_BYPASS (1 << 18)
491#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
492#define DPLLA_TEST_N_BYPASS (1 << 3)
493#define DPLLA_TEST_M_BYPASS (1 << 2)
494#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
495#define D_STATE 0x6104
496#define CG_2D_DIS 0x6200
497#define CG_3D_DIS 0x6204
498
499/*
500 * Palette regs
501 */
502
503#define PALETTE_A 0x0a000
504#define PALETTE_B 0x0a800
505
506/* MCH MMIO space */
507
508/*
509 * MCHBAR mirror.
510 *
511 * This mirrors the MCHBAR MMIO space whose location is determined by
512 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
513 * every way. It is not accessible from the CP register read instructions.
514 *
515 */
516#define MCHBAR_MIRROR_BASE 0x10000
517
518/** 915-945 and GM965 MCH register controlling DRAM channel access */
519#define DCC 0x10200
520#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
521#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525
526/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206
528#define C1DRB3 0x10606
529
530/*
531 * Overlay regs
532 */
533
534#define OVADD 0x30000
535#define DOVSTA 0x30008
536#define OC_BUF (0x3<<20)
537#define OGAMC5 0x30010
538#define OGAMC4 0x30014
539#define OGAMC3 0x30018
540#define OGAMC2 0x3001c
541#define OGAMC1 0x30020
542#define OGAMC0 0x30024
543
544/*
545 * Display engine regs
546 */
547
548/* Pipe A timing regs */
549#define HTOTAL_A 0x60000
550#define HBLANK_A 0x60004
551#define HSYNC_A 0x60008
552#define VTOTAL_A 0x6000c
553#define VBLANK_A 0x60010
554#define VSYNC_A 0x60014
555#define PIPEASRC 0x6001c
556#define BCLRPAT_A 0x60020
557
558/* Pipe B timing regs */
559#define HTOTAL_B 0x61000
560#define HBLANK_B 0x61004
561#define HSYNC_B 0x61008
562#define VTOTAL_B 0x6100c
563#define VBLANK_B 0x61010
564#define VSYNC_B 0x61014
565#define PIPEBSRC 0x6101c
566#define BCLRPAT_B 0x61020
567
568/* VGA port control */
569#define ADPA 0x61100
570#define ADPA_DAC_ENABLE (1<<31)
571#define ADPA_DAC_DISABLE 0
572#define ADPA_PIPE_SELECT_MASK (1<<30)
573#define ADPA_PIPE_A_SELECT 0
574#define ADPA_PIPE_B_SELECT (1<<30)
575#define ADPA_USE_VGA_HVPOLARITY (1<<15)
576#define ADPA_SETS_HVPOLARITY 0
577#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
578#define ADPA_VSYNC_CNTL_ENABLE 0
579#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
580#define ADPA_HSYNC_CNTL_ENABLE 0
581#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
582#define ADPA_VSYNC_ACTIVE_LOW 0
583#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
584#define ADPA_HSYNC_ACTIVE_LOW 0
585#define ADPA_DPMS_MASK (~(3<<10))
586#define ADPA_DPMS_ON (0<<10)
587#define ADPA_DPMS_SUSPEND (1<<10)
588#define ADPA_DPMS_STANDBY (2<<10)
589#define ADPA_DPMS_OFF (3<<10)
590
591/* Hotplug control (945+ only) */
592#define PORT_HOTPLUG_EN 0x61110
593#define SDVOB_HOTPLUG_INT_EN (1 << 26)
594#define SDVOC_HOTPLUG_INT_EN (1 << 25)
595#define TV_HOTPLUG_INT_EN (1 << 18)
596#define CRT_HOTPLUG_INT_EN (1 << 9)
597#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
598
599#define PORT_HOTPLUG_STAT 0x61114
600#define CRT_HOTPLUG_INT_STATUS (1 << 11)
601#define TV_HOTPLUG_INT_STATUS (1 << 10)
602#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
603#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
604#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
605#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
606#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
607#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
608
609/* SDVO port control */
610#define SDVOB 0x61140
611#define SDVOC 0x61160
612#define SDVO_ENABLE (1 << 31)
613#define SDVO_PIPE_B_SELECT (1 << 30)
614#define SDVO_STALL_SELECT (1 << 29)
615#define SDVO_INTERRUPT_ENABLE (1 << 26)
616/**
617 * 915G/GM SDVO pixel multiplier.
618 *
619 * Programmed value is multiplier - 1, up to 5x.
620 *
621 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
622 */
623#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
624#define SDVO_PORT_MULTIPLY_SHIFT 23
625#define SDVO_PHASE_SELECT_MASK (15 << 19)
626#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
627#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
628#define SDVOC_GANG_MODE (1 << 16)
629#define SDVO_BORDER_ENABLE (1 << 7)
630#define SDVOB_PCIE_CONCURRENCY (1 << 3)
631#define SDVO_DETECTED (1 << 2)
632/* Bits to be preserved when writing */
633#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
634#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
635
636/* DVO port control */
637#define DVOA 0x61120
638#define DVOB 0x61140
639#define DVOC 0x61160
640#define DVO_ENABLE (1 << 31)
641#define DVO_PIPE_B_SELECT (1 << 30)
642#define DVO_PIPE_STALL_UNUSED (0 << 28)
643#define DVO_PIPE_STALL (1 << 28)
644#define DVO_PIPE_STALL_TV (2 << 28)
645#define DVO_PIPE_STALL_MASK (3 << 28)
646#define DVO_USE_VGA_SYNC (1 << 15)
647#define DVO_DATA_ORDER_I740 (0 << 14)
648#define DVO_DATA_ORDER_FP (1 << 14)
649#define DVO_VSYNC_DISABLE (1 << 11)
650#define DVO_HSYNC_DISABLE (1 << 10)
651#define DVO_VSYNC_TRISTATE (1 << 9)
652#define DVO_HSYNC_TRISTATE (1 << 8)
653#define DVO_BORDER_ENABLE (1 << 7)
654#define DVO_DATA_ORDER_GBRG (1 << 6)
655#define DVO_DATA_ORDER_RGGB (0 << 6)
656#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
657#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
658#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
659#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
660#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
661#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
662#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
663#define DVO_PRESERVE_MASK (0x7<<24)
664#define DVOA_SRCDIM 0x61124
665#define DVOB_SRCDIM 0x61144
666#define DVOC_SRCDIM 0x61164
667#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
668#define DVO_SRCDIM_VERTICAL_SHIFT 0
669
670/* LVDS port control */
671#define LVDS 0x61180
672/*
673 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
674 * the DPLL semantics change when the LVDS is assigned to that pipe.
675 */
676#define LVDS_PORT_EN (1 << 31)
677/* Selects pipe B for LVDS data. Must be set on pre-965. */
678#define LVDS_PIPEB_SELECT (1 << 30)
679/*
680 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
681 * pixel.
682 */
683#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
684#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
685#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
686/*
687 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
688 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
689 * on.
690 */
691#define LVDS_A3_POWER_MASK (3 << 6)
692#define LVDS_A3_POWER_DOWN (0 << 6)
693#define LVDS_A3_POWER_UP (3 << 6)
694/*
695 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
696 * is set.
697 */
698#define LVDS_CLKB_POWER_MASK (3 << 4)
699#define LVDS_CLKB_POWER_DOWN (0 << 4)
700#define LVDS_CLKB_POWER_UP (3 << 4)
701/*
702 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
703 * setting for whether we are in dual-channel mode. The B3 pair will
704 * additionally only be powered up when LVDS_A3_POWER_UP is set.
705 */
706#define LVDS_B0B3_POWER_MASK (3 << 2)
707#define LVDS_B0B3_POWER_DOWN (0 << 2)
708#define LVDS_B0B3_POWER_UP (3 << 2)
709
710/* Panel power sequencing */
711#define PP_STATUS 0x61200
712#define PP_ON (1 << 31)
713/*
714 * Indicates that all dependencies of the panel are on:
715 *
716 * - PLL enabled
717 * - pipe enabled
718 * - LVDS/DVOB/DVOC on
719 */
720#define PP_READY (1 << 30)
721#define PP_SEQUENCE_NONE (0 << 28)
722#define PP_SEQUENCE_ON (1 << 28)
723#define PP_SEQUENCE_OFF (2 << 28)
724#define PP_SEQUENCE_MASK 0x30000000
725#define PP_CONTROL 0x61204
726#define POWER_TARGET_ON (1 << 0)
727#define PP_ON_DELAYS 0x61208
728#define PP_OFF_DELAYS 0x6120c
729#define PP_DIVISOR 0x61210
730
731/* Panel fitting */
732#define PFIT_CONTROL 0x61230
733#define PFIT_ENABLE (1 << 31)
734#define PFIT_PIPE_MASK (3 << 29)
735#define PFIT_PIPE_SHIFT 29
736#define VERT_INTERP_DISABLE (0 << 10)
737#define VERT_INTERP_BILINEAR (1 << 10)
738#define VERT_INTERP_MASK (3 << 10)
739#define VERT_AUTO_SCALE (1 << 9)
740#define HORIZ_INTERP_DISABLE (0 << 6)
741#define HORIZ_INTERP_BILINEAR (1 << 6)
742#define HORIZ_INTERP_MASK (3 << 6)
743#define HORIZ_AUTO_SCALE (1 << 5)
744#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
745#define PFIT_PGM_RATIOS 0x61234
746#define PFIT_VERT_SCALE_MASK 0xfff00000
747#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
748#define PFIT_AUTO_RATIOS 0x61238
749
750/* Backlight control */
751#define BLC_PWM_CTL 0x61254
752#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
753#define BLC_PWM_CTL2 0x61250 /* 965+ only */
754#define BLM_COMBINATION_MODE (1 << 30)
755/*
756 * This is the most significant 15 bits of the number of backlight cycles in a
757 * complete cycle of the modulated backlight control.
758 *
759 * The actual value is this field multiplied by two.
760 */
761#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
762#define BLM_LEGACY_MODE (1 << 16)
763/*
764 * This is the number of cycles out of the backlight modulation cycle for which
765 * the backlight is on.
766 *
767 * This field must be no greater than the number of cycles in the complete
768 * backlight modulation cycle.
769 */
770#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
771#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
772
773/* TV port control */
774#define TV_CTL 0x68000
775/** Enables the TV encoder */
776# define TV_ENC_ENABLE (1 << 31)
777/** Sources the TV encoder input from pipe B instead of A. */
778# define TV_ENC_PIPEB_SELECT (1 << 30)
779/** Outputs composite video (DAC A only) */
780# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
781/** Outputs SVideo video (DAC B/C) */
782# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
783/** Outputs Component video (DAC A/B/C) */
784# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
785/** Outputs Composite and SVideo (DAC A/B/C) */
786# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
787# define TV_TRILEVEL_SYNC (1 << 21)
788/** Enables slow sync generation (945GM only) */
789# define TV_SLOW_SYNC (1 << 20)
790/** Selects 4x oversampling for 480i and 576p */
791# define TV_OVERSAMPLE_4X (0 << 18)
792/** Selects 2x oversampling for 720p and 1080i */
793# define TV_OVERSAMPLE_2X (1 << 18)
794/** Selects no oversampling for 1080p */
795# define TV_OVERSAMPLE_NONE (2 << 18)
796/** Selects 8x oversampling */
797# define TV_OVERSAMPLE_8X (3 << 18)
798/** Selects progressive mode rather than interlaced */
799# define TV_PROGRESSIVE (1 << 17)
800/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
801# define TV_PAL_BURST (1 << 16)
802/** Field for setting delay of Y compared to C */
803# define TV_YC_SKEW_MASK (7 << 12)
804/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
805# define TV_ENC_SDP_FIX (1 << 11)
806/**
807 * Enables a fix for the 915GM only.
808 *
809 * Not sure what it does.
810 */
811# define TV_ENC_C0_FIX (1 << 10)
812/** Bits that must be preserved by software */
813# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
814# define TV_FUSE_STATE_MASK (3 << 4)
815/** Read-only state that reports all features enabled */
816# define TV_FUSE_STATE_ENABLED (0 << 4)
817/** Read-only state that reports that Macrovision is disabled in hardware*/
818# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
819/** Read-only state that reports that TV-out is disabled in hardware. */
820# define TV_FUSE_STATE_DISABLED (2 << 4)
821/** Normal operation */
822# define TV_TEST_MODE_NORMAL (0 << 0)
823/** Encoder test pattern 1 - combo pattern */
824# define TV_TEST_MODE_PATTERN_1 (1 << 0)
825/** Encoder test pattern 2 - full screen vertical 75% color bars */
826# define TV_TEST_MODE_PATTERN_2 (2 << 0)
827/** Encoder test pattern 3 - full screen horizontal 75% color bars */
828# define TV_TEST_MODE_PATTERN_3 (3 << 0)
829/** Encoder test pattern 4 - random noise */
830# define TV_TEST_MODE_PATTERN_4 (4 << 0)
831/** Encoder test pattern 5 - linear color ramps */
832# define TV_TEST_MODE_PATTERN_5 (5 << 0)
833/**
834 * This test mode forces the DACs to 50% of full output.
835 *
836 * This is used for load detection in combination with TVDAC_SENSE_MASK
837 */
838# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
839# define TV_TEST_MODE_MASK (7 << 0)
840
841#define TV_DAC 0x68004
842/**
843 * Reports that DAC state change logic has reported change (RO).
844 *
845 * This gets cleared when TV_DAC_STATE_EN is cleared
846*/
847# define TVDAC_STATE_CHG (1 << 31)
848# define TVDAC_SENSE_MASK (7 << 28)
849/** Reports that DAC A voltage is above the detect threshold */
850# define TVDAC_A_SENSE (1 << 30)
851/** Reports that DAC B voltage is above the detect threshold */
852# define TVDAC_B_SENSE (1 << 29)
853/** Reports that DAC C voltage is above the detect threshold */
854# define TVDAC_C_SENSE (1 << 28)
855/**
856 * Enables DAC state detection logic, for load-based TV detection.
857 *
858 * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
859 * to off, for load detection to work.
860 */
861# define TVDAC_STATE_CHG_EN (1 << 27)
862/** Sets the DAC A sense value to high */
863# define TVDAC_A_SENSE_CTL (1 << 26)
864/** Sets the DAC B sense value to high */
865# define TVDAC_B_SENSE_CTL (1 << 25)
866/** Sets the DAC C sense value to high */
867# define TVDAC_C_SENSE_CTL (1 << 24)
868/** Overrides the ENC_ENABLE and DAC voltage levels */
869# define DAC_CTL_OVERRIDE (1 << 7)
870/** Sets the slew rate. Must be preserved in software */
871# define ENC_TVDAC_SLEW_FAST (1 << 6)
872# define DAC_A_1_3_V (0 << 4)
873# define DAC_A_1_1_V (1 << 4)
874# define DAC_A_0_7_V (2 << 4)
875# define DAC_A_OFF (3 << 4)
876# define DAC_B_1_3_V (0 << 2)
877# define DAC_B_1_1_V (1 << 2)
878# define DAC_B_0_7_V (2 << 2)
879# define DAC_B_OFF (3 << 2)
880# define DAC_C_1_3_V (0 << 0)
881# define DAC_C_1_1_V (1 << 0)
882# define DAC_C_0_7_V (2 << 0)
883# define DAC_C_OFF (3 << 0)
884
885/**
886 * CSC coefficients are stored in a floating point format with 9 bits of
887 * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
888 * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
889 * -1 (0x3) being the only legal negative value.
890 */
891#define TV_CSC_Y 0x68010
892# define TV_RY_MASK 0x07ff0000
893# define TV_RY_SHIFT 16
894# define TV_GY_MASK 0x00000fff
895# define TV_GY_SHIFT 0
896
897#define TV_CSC_Y2 0x68014
898# define TV_BY_MASK 0x07ff0000
899# define TV_BY_SHIFT 16
900/**
901 * Y attenuation for component video.
902 *
903 * Stored in 1.9 fixed point.
904 */
905# define TV_AY_MASK 0x000003ff
906# define TV_AY_SHIFT 0
907
908#define TV_CSC_U 0x68018
909# define TV_RU_MASK 0x07ff0000
910# define TV_RU_SHIFT 16
911# define TV_GU_MASK 0x000007ff
912# define TV_GU_SHIFT 0
913
914#define TV_CSC_U2 0x6801c
915# define TV_BU_MASK 0x07ff0000
916# define TV_BU_SHIFT 16
917/**
918 * U attenuation for component video.
919 *
920 * Stored in 1.9 fixed point.
921 */
922# define TV_AU_MASK 0x000003ff
923# define TV_AU_SHIFT 0
924
925#define TV_CSC_V 0x68020
926# define TV_RV_MASK 0x0fff0000
927# define TV_RV_SHIFT 16
928# define TV_GV_MASK 0x000007ff
929# define TV_GV_SHIFT 0
930
931#define TV_CSC_V2 0x68024
932# define TV_BV_MASK 0x07ff0000
933# define TV_BV_SHIFT 16
934/**
935 * V attenuation for component video.
936 *
937 * Stored in 1.9 fixed point.
938 */
939# define TV_AV_MASK 0x000007ff
940# define TV_AV_SHIFT 0
941
942#define TV_CLR_KNOBS 0x68028
943/** 2s-complement brightness adjustment */
944# define TV_BRIGHTNESS_MASK 0xff000000
945# define TV_BRIGHTNESS_SHIFT 24
946/** Contrast adjustment, as a 2.6 unsigned floating point number */
947# define TV_CONTRAST_MASK 0x00ff0000
948# define TV_CONTRAST_SHIFT 16
949/** Saturation adjustment, as a 2.6 unsigned floating point number */
950# define TV_SATURATION_MASK 0x0000ff00
951# define TV_SATURATION_SHIFT 8
952/** Hue adjustment, as an integer phase angle in degrees */
953# define TV_HUE_MASK 0x000000ff
954# define TV_HUE_SHIFT 0
955
956#define TV_CLR_LEVEL 0x6802c
957/** Controls the DAC level for black */
958# define TV_BLACK_LEVEL_MASK 0x01ff0000
959# define TV_BLACK_LEVEL_SHIFT 16
960/** Controls the DAC level for blanking */
961# define TV_BLANK_LEVEL_MASK 0x000001ff
962# define TV_BLANK_LEVEL_SHIFT 0
963
964#define TV_H_CTL_1 0x68030
965/** Number of pixels in the hsync. */
966# define TV_HSYNC_END_MASK 0x1fff0000
967# define TV_HSYNC_END_SHIFT 16
968/** Total number of pixels minus one in the line (display and blanking). */
969# define TV_HTOTAL_MASK 0x00001fff
970# define TV_HTOTAL_SHIFT 0
971
972#define TV_H_CTL_2 0x68034
973/** Enables the colorburst (needed for non-component color) */
974# define TV_BURST_ENA (1 << 31)
975/** Offset of the colorburst from the start of hsync, in pixels minus one. */
976# define TV_HBURST_START_SHIFT 16
977# define TV_HBURST_START_MASK 0x1fff0000
978/** Length of the colorburst */
979# define TV_HBURST_LEN_SHIFT 0
980# define TV_HBURST_LEN_MASK 0x0001fff
981
982#define TV_H_CTL_3 0x68038
983/** End of hblank, measured in pixels minus one from start of hsync */
984# define TV_HBLANK_END_SHIFT 16
985# define TV_HBLANK_END_MASK 0x1fff0000
986/** Start of hblank, measured in pixels minus one from start of hsync */
987# define TV_HBLANK_START_SHIFT 0
988# define TV_HBLANK_START_MASK 0x0001fff
989
990#define TV_V_CTL_1 0x6803c
991/** XXX */
992# define TV_NBR_END_SHIFT 16
993# define TV_NBR_END_MASK 0x07ff0000
994/** XXX */
995# define TV_VI_END_F1_SHIFT 8
996# define TV_VI_END_F1_MASK 0x00003f00
997/** XXX */
998# define TV_VI_END_F2_SHIFT 0
999# define TV_VI_END_F2_MASK 0x0000003f
1000
1001#define TV_V_CTL_2 0x68040
1002/** Length of vsync, in half lines */
1003# define TV_VSYNC_LEN_MASK 0x07ff0000
1004# define TV_VSYNC_LEN_SHIFT 16
1005/** Offset of the start of vsync in field 1, measured in one less than the
1006 * number of half lines.
1007 */
1008# define TV_VSYNC_START_F1_MASK 0x00007f00
1009# define TV_VSYNC_START_F1_SHIFT 8
1010/**
1011 * Offset of the start of vsync in field 2, measured in one less than the
1012 * number of half lines.
1013 */
1014# define TV_VSYNC_START_F2_MASK 0x0000007f
1015# define TV_VSYNC_START_F2_SHIFT 0
1016
1017#define TV_V_CTL_3 0x68044
1018/** Enables generation of the equalization signal */
1019# define TV_EQUAL_ENA (1 << 31)
1020/** Length of vsync, in half lines */
1021# define TV_VEQ_LEN_MASK 0x007f0000
1022# define TV_VEQ_LEN_SHIFT 16
1023/** Offset of the start of equalization in field 1, measured in one less than
1024 * the number of half lines.
1025 */
1026# define TV_VEQ_START_F1_MASK 0x0007f00
1027# define TV_VEQ_START_F1_SHIFT 8
1028/**
1029 * Offset of the start of equalization in field 2, measured in one less than
1030 * the number of half lines.
1031 */
1032# define TV_VEQ_START_F2_MASK 0x000007f
1033# define TV_VEQ_START_F2_SHIFT 0
1034
1035#define TV_V_CTL_4 0x68048
1036/**
1037 * Offset to start of vertical colorburst, measured in one less than the
1038 * number of lines from vertical start.
1039 */
1040# define TV_VBURST_START_F1_MASK 0x003f0000
1041# define TV_VBURST_START_F1_SHIFT 16
1042/**
1043 * Offset to the end of vertical colorburst, measured in one less than the
1044 * number of lines from the start of NBR.
1045 */
1046# define TV_VBURST_END_F1_MASK 0x000000ff
1047# define TV_VBURST_END_F1_SHIFT 0
1048
1049#define TV_V_CTL_5 0x6804c
1050/**
1051 * Offset to start of vertical colorburst, measured in one less than the
1052 * number of lines from vertical start.
1053 */
1054# define TV_VBURST_START_F2_MASK 0x003f0000
1055# define TV_VBURST_START_F2_SHIFT 16
1056/**
1057 * Offset to the end of vertical colorburst, measured in one less than the
1058 * number of lines from the start of NBR.
1059 */
1060# define TV_VBURST_END_F2_MASK 0x000000ff
1061# define TV_VBURST_END_F2_SHIFT 0
1062
1063#define TV_V_CTL_6 0x68050
1064/**
1065 * Offset to start of vertical colorburst, measured in one less than the
1066 * number of lines from vertical start.
1067 */
1068# define TV_VBURST_START_F3_MASK 0x003f0000
1069# define TV_VBURST_START_F3_SHIFT 16
1070/**
1071 * Offset to the end of vertical colorburst, measured in one less than the
1072 * number of lines from the start of NBR.
1073 */
1074# define TV_VBURST_END_F3_MASK 0x000000ff
1075# define TV_VBURST_END_F3_SHIFT 0
1076
1077#define TV_V_CTL_7 0x68054
1078/**
1079 * Offset to start of vertical colorburst, measured in one less than the
1080 * number of lines from vertical start.
1081 */
1082# define TV_VBURST_START_F4_MASK 0x003f0000
1083# define TV_VBURST_START_F4_SHIFT 16
1084/**
1085 * Offset to the end of vertical colorburst, measured in one less than the
1086 * number of lines from the start of NBR.
1087 */
1088# define TV_VBURST_END_F4_MASK 0x000000ff
1089# define TV_VBURST_END_F4_SHIFT 0
1090
1091#define TV_SC_CTL_1 0x68060
1092/** Turns on the first subcarrier phase generation DDA */
1093# define TV_SC_DDA1_EN (1 << 31)
1094/** Turns on the first subcarrier phase generation DDA */
1095# define TV_SC_DDA2_EN (1 << 30)
1096/** Turns on the first subcarrier phase generation DDA */
1097# define TV_SC_DDA3_EN (1 << 29)
1098/** Sets the subcarrier DDA to reset frequency every other field */
1099# define TV_SC_RESET_EVERY_2 (0 << 24)
1100/** Sets the subcarrier DDA to reset frequency every fourth field */
1101# define TV_SC_RESET_EVERY_4 (1 << 24)
1102/** Sets the subcarrier DDA to reset frequency every eighth field */
1103# define TV_SC_RESET_EVERY_8 (2 << 24)
1104/** Sets the subcarrier DDA to never reset the frequency */
1105# define TV_SC_RESET_NEVER (3 << 24)
1106/** Sets the peak amplitude of the colorburst.*/
1107# define TV_BURST_LEVEL_MASK 0x00ff0000
1108# define TV_BURST_LEVEL_SHIFT 16
1109/** Sets the increment of the first subcarrier phase generation DDA */
1110# define TV_SCDDA1_INC_MASK 0x00000fff
1111# define TV_SCDDA1_INC_SHIFT 0
1112
1113#define TV_SC_CTL_2 0x68064
1114/** Sets the rollover for the second subcarrier phase generation DDA */
1115# define TV_SCDDA2_SIZE_MASK 0x7fff0000
1116# define TV_SCDDA2_SIZE_SHIFT 16
1117/** Sets the increent of the second subcarrier phase generation DDA */
1118# define TV_SCDDA2_INC_MASK 0x00007fff
1119# define TV_SCDDA2_INC_SHIFT 0
1120
1121#define TV_SC_CTL_3 0x68068
1122/** Sets the rollover for the third subcarrier phase generation DDA */
1123# define TV_SCDDA3_SIZE_MASK 0x7fff0000
1124# define TV_SCDDA3_SIZE_SHIFT 16
1125/** Sets the increent of the third subcarrier phase generation DDA */
1126# define TV_SCDDA3_INC_MASK 0x00007fff
1127# define TV_SCDDA3_INC_SHIFT 0
1128
1129#define TV_WIN_POS 0x68070
1130/** X coordinate of the display from the start of horizontal active */
1131# define TV_XPOS_MASK 0x1fff0000
1132# define TV_XPOS_SHIFT 16
1133/** Y coordinate of the display from the start of vertical active (NBR) */
1134# define TV_YPOS_MASK 0x00000fff
1135# define TV_YPOS_SHIFT 0
1136
1137#define TV_WIN_SIZE 0x68074
1138/** Horizontal size of the display window, measured in pixels*/
1139# define TV_XSIZE_MASK 0x1fff0000
1140# define TV_XSIZE_SHIFT 16
1141/**
1142 * Vertical size of the display window, measured in pixels.
1143 *
1144 * Must be even for interlaced modes.
1145 */
1146# define TV_YSIZE_MASK 0x00000fff
1147# define TV_YSIZE_SHIFT 0
1148
1149#define TV_FILTER_CTL_1 0x68080
1150/**
1151 * Enables automatic scaling calculation.
1152 *
1153 * If set, the rest of the registers are ignored, and the calculated values can
1154 * be read back from the register.
1155 */
1156# define TV_AUTO_SCALE (1 << 31)
1157/**
1158 * Disables the vertical filter.
1159 *
1160 * This is required on modes more than 1024 pixels wide */
1161# define TV_V_FILTER_BYPASS (1 << 29)
1162/** Enables adaptive vertical filtering */
1163# define TV_VADAPT (1 << 28)
1164# define TV_VADAPT_MODE_MASK (3 << 26)
1165/** Selects the least adaptive vertical filtering mode */
1166# define TV_VADAPT_MODE_LEAST (0 << 26)
1167/** Selects the moderately adaptive vertical filtering mode */
1168# define TV_VADAPT_MODE_MODERATE (1 << 26)
1169/** Selects the most adaptive vertical filtering mode */
1170# define TV_VADAPT_MODE_MOST (3 << 26)
1171/**
1172 * Sets the horizontal scaling factor.
1173 *
1174 * This should be the fractional part of the horizontal scaling factor divided
1175 * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
1176 *
1177 * (src width - 1) / ((oversample * dest width) - 1)
1178 */
1179# define TV_HSCALE_FRAC_MASK 0x00003fff
1180# define TV_HSCALE_FRAC_SHIFT 0
1181
1182#define TV_FILTER_CTL_2 0x68084
1183/**
1184 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
1185 *
1186 * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
1187 */
1188# define TV_VSCALE_INT_MASK 0x00038000
1189# define TV_VSCALE_INT_SHIFT 15
1190/**
1191 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
1192 *
1193 * \sa TV_VSCALE_INT_MASK
1194 */
1195# define TV_VSCALE_FRAC_MASK 0x00007fff
1196# define TV_VSCALE_FRAC_SHIFT 0
1197
1198#define TV_FILTER_CTL_3 0x68088
1199/**
1200 * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
1201 *
1202 * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
1203 *
1204 * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
1205 */
1206# define TV_VSCALE_IP_INT_MASK 0x00038000
1207# define TV_VSCALE_IP_INT_SHIFT 15
1208/**
1209 * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
1210 *
1211 * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
1212 *
1213 * \sa TV_VSCALE_IP_INT_MASK
1214 */
1215# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
1216# define TV_VSCALE_IP_FRAC_SHIFT 0
1217
1218#define TV_CC_CONTROL 0x68090
1219# define TV_CC_ENABLE (1 << 31)
1220/**
1221 * Specifies which field to send the CC data in.
1222 *
1223 * CC data is usually sent in field 0.
1224 */
1225# define TV_CC_FID_MASK (1 << 27)
1226# define TV_CC_FID_SHIFT 27
1227/** Sets the horizontal position of the CC data. Usually 135. */
1228# define TV_CC_HOFF_MASK 0x03ff0000
1229# define TV_CC_HOFF_SHIFT 16
1230/** Sets the vertical position of the CC data. Usually 21 */
1231# define TV_CC_LINE_MASK 0x0000003f
1232# define TV_CC_LINE_SHIFT 0
1233
1234#define TV_CC_DATA 0x68094
1235# define TV_CC_RDY (1 << 31)
1236/** Second word of CC data to be transmitted. */
1237# define TV_CC_DATA_2_MASK 0x007f0000
1238# define TV_CC_DATA_2_SHIFT 16
1239/** First word of CC data to be transmitted. */
1240# define TV_CC_DATA_1_MASK 0x0000007f
1241# define TV_CC_DATA_1_SHIFT 0
1242
1243#define TV_H_LUMA_0 0x68100
1244#define TV_H_LUMA_59 0x681ec
1245#define TV_H_CHROMA_0 0x68200
1246#define TV_H_CHROMA_59 0x682ec
1247#define TV_V_LUMA_0 0x68300
1248#define TV_V_LUMA_42 0x683a8
1249#define TV_V_CHROMA_0 0x68400
1250#define TV_V_CHROMA_42 0x684a8
1251
1252/* Display & cursor control */
1253
1254/* Pipe A */
1255#define PIPEADSL 0x70000
1256#define PIPEACONF 0x70008
1257#define PIPEACONF_ENABLE (1<<31)
1258#define PIPEACONF_DISABLE 0
1259#define PIPEACONF_DOUBLE_WIDE (1<<30)
1260#define I965_PIPECONF_ACTIVE (1<<30)
1261#define PIPEACONF_SINGLE_WIDE 0
1262#define PIPEACONF_PIPE_UNLOCKED 0
1263#define PIPEACONF_PIPE_LOCKED (1<<25)
1264#define PIPEACONF_PALETTE 0
1265#define PIPEACONF_GAMMA (1<<24)
1266#define PIPECONF_FORCE_BORDER (1<<25)
1267#define PIPECONF_PROGRESSIVE (0 << 21)
1268#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
1269#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
1270#define PIPEASTAT 0x70024
1271#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
1272#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
1273#define PIPE_CRC_DONE_ENABLE (1UL<<28)
1274#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
1275#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
1276#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
1277#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
1278#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
1279#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
1280#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
1281#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
1282#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
1283#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
1284#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
1285#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
1286#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
1287#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
1288#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
1289#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
1290#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
1291#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
1292#define PIPE_DPST_EVENT_STATUS (1UL<<7)
1293#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
1294#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
1295#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
1296#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
1297#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
1298#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
1299#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
1300
1301#define DSPARB 0x70030
1302#define DSPARB_CSTART_MASK (0x7f << 7)
1303#define DSPARB_CSTART_SHIFT 7
1304#define DSPARB_BSTART_MASK (0x7f)
1305#define DSPARB_BSTART_SHIFT 0
1306/*
1307 * The two pipe frame counter registers are not synchronized, so
1308 * reading a stable value is somewhat tricky. The following code
1309 * should work:
1310 *
1311 * do {
1312 * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
1313 * PIPE_FRAME_HIGH_SHIFT;
1314 * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
1315 * PIPE_FRAME_LOW_SHIFT);
1316 * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
1317 * PIPE_FRAME_HIGH_SHIFT);
1318 * } while (high1 != high2);
1319 * frame = (high1 << 8) | low1;
1320 */
1321#define PIPEAFRAMEHIGH 0x70040
1322#define PIPE_FRAME_HIGH_MASK 0x0000ffff
1323#define PIPE_FRAME_HIGH_SHIFT 0
1324#define PIPEAFRAMEPIXEL 0x70044
1325#define PIPE_FRAME_LOW_MASK 0xff000000
1326#define PIPE_FRAME_LOW_SHIFT 24
1327#define PIPE_PIXEL_MASK 0x00ffffff
1328#define PIPE_PIXEL_SHIFT 0
1329
1330/* Cursor A & B regs */
1331#define CURACNTR 0x70080
1332#define CURSOR_MODE_DISABLE 0x00
1333#define CURSOR_MODE_64_32B_AX 0x07
1334#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
1335#define MCURSOR_GAMMA_ENABLE (1 << 26)
1336#define CURABASE 0x70084
1337#define CURAPOS 0x70088
1338#define CURSOR_POS_MASK 0x007FF
1339#define CURSOR_POS_SIGN 0x8000
1340#define CURSOR_X_SHIFT 0
1341#define CURSOR_Y_SHIFT 16
1342#define CURBCNTR 0x700c0
1343#define CURBBASE 0x700c4
1344#define CURBPOS 0x700c8
1345
1346/* Display A control */
1347#define DSPACNTR 0x70180
1348#define DISPLAY_PLANE_ENABLE (1<<31)
1349#define DISPLAY_PLANE_DISABLE 0
1350#define DISPPLANE_GAMMA_ENABLE (1<<30)
1351#define DISPPLANE_GAMMA_DISABLE 0
1352#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
1353#define DISPPLANE_8BPP (0x2<<26)
1354#define DISPPLANE_15_16BPP (0x4<<26)
1355#define DISPPLANE_16BPP (0x5<<26)
1356#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1357#define DISPPLANE_32BPP (0x7<<26)
1358#define DISPPLANE_STEREO_ENABLE (1<<25)
1359#define DISPPLANE_STEREO_DISABLE 0
1360#define DISPPLANE_SEL_PIPE_MASK (1<<24)
1361#define DISPPLANE_SEL_PIPE_A 0
1362#define DISPPLANE_SEL_PIPE_B (1<<24)
1363#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
1364#define DISPPLANE_SRC_KEY_DISABLE 0
1365#define DISPPLANE_LINE_DOUBLE (1<<20)
1366#define DISPPLANE_NO_LINE_DOUBLE 0
1367#define DISPPLANE_STEREO_POLARITY_FIRST 0
1368#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1369#define DSPAADDR 0x70184
1370#define DSPASTRIDE 0x70188
1371#define DSPAPOS 0x7018C /* reserved */
1372#define DSPASIZE 0x70190
1373#define DSPASURF 0x7019C /* 965+ only */
1374#define DSPATILEOFF 0x701A4 /* 965+ only */
1375
1376/* VBIOS flags */
1377#define SWF00 0x71410
1378#define SWF01 0x71414
1379#define SWF02 0x71418
1380#define SWF03 0x7141c
1381#define SWF04 0x71420
1382#define SWF05 0x71424
1383#define SWF06 0x71428
1384#define SWF10 0x70410
1385#define SWF11 0x70414
1386#define SWF14 0x71420
1387#define SWF30 0x72414
1388#define SWF31 0x72418
1389#define SWF32 0x7241c
1390
1391/* Pipe B */
1392#define PIPEBDSL 0x71000
1393#define PIPEBCONF 0x71008
1394#define PIPEBSTAT 0x71024
1395#define PIPEBFRAMEHIGH 0x71040
1396#define PIPEBFRAMEPIXEL 0x71044
1397
1398/* Display B control */
1399#define DSPBCNTR 0x71180
1400#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
1401#define DISPPLANE_ALPHA_TRANS_DISABLE 0
1402#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
1403#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
1404#define DSPBADDR 0x71184
1405#define DSPBSTRIDE 0x71188
1406#define DSPBPOS 0x7118C
1407#define DSPBSIZE 0x71190
1408#define DSPBSURF 0x7119C
1409#define DSPBTILEOFF 0x711A4
1410
1411/* VBIOS regs */
1412#define VGACNTRL 0x71400
1413# define VGA_DISP_DISABLE (1 << 31)
1414# define VGA_2X_MODE (1 << 30)
1415# define VGA_PIPE_B_SELECT (1 << 29)
1416
1417#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
new file mode 100644
index 000000000000..603fe742ccd4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -0,0 +1,509 @@
1/*
2 *
3 * Copyright 2008 (c) Intel Corporation
4 * Jesse Barnes <jbarnes@virtuousgeek.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "i915_drm.h"
30#include "i915_drv.h"
31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35
36 if (pipe == PIPE_A)
37 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
38 else
39 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
40}
41
42static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
43{
44 struct drm_i915_private *dev_priv = dev->dev_private;
45 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
46 u32 *array;
47 int i;
48
49 if (!i915_pipe_enabled(dev, pipe))
50 return;
51
52 if (pipe == PIPE_A)
53 array = dev_priv->save_palette_a;
54 else
55 array = dev_priv->save_palette_b;
56
57 for(i = 0; i < 256; i++)
58 array[i] = I915_READ(reg + (i << 2));
59}
60
61static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
62{
63 struct drm_i915_private *dev_priv = dev->dev_private;
64 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
65 u32 *array;
66 int i;
67
68 if (!i915_pipe_enabled(dev, pipe))
69 return;
70
71 if (pipe == PIPE_A)
72 array = dev_priv->save_palette_a;
73 else
74 array = dev_priv->save_palette_b;
75
76 for(i = 0; i < 256; i++)
77 I915_WRITE(reg + (i << 2), array[i]);
78}
79
80static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
81{
82 struct drm_i915_private *dev_priv = dev->dev_private;
83
84 I915_WRITE8(index_port, reg);
85 return I915_READ8(data_port);
86}
87
88static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
89{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91
92 I915_READ8(st01);
93 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
94 return I915_READ8(VGA_AR_DATA_READ);
95}
96
97static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
98{
99 struct drm_i915_private *dev_priv = dev->dev_private;
100
101 I915_READ8(st01);
102 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
103 I915_WRITE8(VGA_AR_DATA_WRITE, val);
104}
105
106static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
107{
108 struct drm_i915_private *dev_priv = dev->dev_private;
109
110 I915_WRITE8(index_port, reg);
111 I915_WRITE8(data_port, val);
112}
113
114static void i915_save_vga(struct drm_device *dev)
115{
116 struct drm_i915_private *dev_priv = dev->dev_private;
117 int i;
118 u16 cr_index, cr_data, st01;
119
120 /* VGA color palette registers */
121 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
122 /* DACCRX automatically increments during read */
123 I915_WRITE8(VGA_DACRX, 0);
124 /* Read 3 bytes of color data from each index */
125 for (i = 0; i < 256 * 3; i++)
126 dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
127
128 /* MSR bits */
129 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
130 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
131 cr_index = VGA_CR_INDEX_CGA;
132 cr_data = VGA_CR_DATA_CGA;
133 st01 = VGA_ST01_CGA;
134 } else {
135 cr_index = VGA_CR_INDEX_MDA;
136 cr_data = VGA_CR_DATA_MDA;
137 st01 = VGA_ST01_MDA;
138 }
139
140 /* CRT controller regs */
141 i915_write_indexed(dev, cr_index, cr_data, 0x11,
142 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
143 (~0x80));
144 for (i = 0; i <= 0x24; i++)
145 dev_priv->saveCR[i] =
146 i915_read_indexed(dev, cr_index, cr_data, i);
147 /* Make sure we don't turn off CR group 0 writes */
148 dev_priv->saveCR[0x11] &= ~0x80;
149
150 /* Attribute controller registers */
151 I915_READ8(st01);
152 dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
153 for (i = 0; i <= 0x14; i++)
154 dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
155 I915_READ8(st01);
156 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
157 I915_READ8(st01);
158
159 /* Graphics controller registers */
160 for (i = 0; i < 9; i++)
161 dev_priv->saveGR[i] =
162 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
163
164 dev_priv->saveGR[0x10] =
165 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
166 dev_priv->saveGR[0x11] =
167 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
168 dev_priv->saveGR[0x18] =
169 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
170
171 /* Sequencer registers */
172 for (i = 0; i < 8; i++)
173 dev_priv->saveSR[i] =
174 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
175}
176
177static void i915_restore_vga(struct drm_device *dev)
178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180 int i;
181 u16 cr_index, cr_data, st01;
182
183 /* MSR bits */
184 I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
185 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
186 cr_index = VGA_CR_INDEX_CGA;
187 cr_data = VGA_CR_DATA_CGA;
188 st01 = VGA_ST01_CGA;
189 } else {
190 cr_index = VGA_CR_INDEX_MDA;
191 cr_data = VGA_CR_DATA_MDA;
192 st01 = VGA_ST01_MDA;
193 }
194
195 /* Sequencer registers, don't write SR07 */
196 for (i = 0; i < 7; i++)
197 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
198 dev_priv->saveSR[i]);
199
200 /* CRT controller regs */
201 /* Enable CR group 0 writes */
202 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
203 for (i = 0; i <= 0x24; i++)
204 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
205
206 /* Graphics controller regs */
207 for (i = 0; i < 9; i++)
208 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
209 dev_priv->saveGR[i]);
210
211 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
212 dev_priv->saveGR[0x10]);
213 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
214 dev_priv->saveGR[0x11]);
215 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
216 dev_priv->saveGR[0x18]);
217
218 /* Attribute controller registers */
219 I915_READ8(st01); /* switch back to index mode */
220 for (i = 0; i <= 0x14; i++)
221 i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
222 I915_READ8(st01); /* switch back to index mode */
223 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
224 I915_READ8(st01);
225
226 /* VGA color palette registers */
227 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
228 /* DACCRX automatically increments during read */
229 I915_WRITE8(VGA_DACWX, 0);
230 /* Read 3 bytes of color data from each index */
231 for (i = 0; i < 256 * 3; i++)
232 I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
233
234}
235
236int i915_save_state(struct drm_device *dev)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 int i;
240
241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
242
243 /* Display arbitration control */
244 dev_priv->saveDSPARB = I915_READ(DSPARB);
245
246 /* Pipe & plane A info */
247 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
248 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
249 dev_priv->saveFPA0 = I915_READ(FPA0);
250 dev_priv->saveFPA1 = I915_READ(FPA1);
251 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
252 if (IS_I965G(dev))
253 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
254 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
255 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
256 dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
257 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
258 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
259 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
260 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
261
262 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
263 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
264 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
265 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
266 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
267 if (IS_I965G(dev)) {
268 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
269 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
270 }
271 i915_save_palette(dev, PIPE_A);
272 dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
273
274 /* Pipe & plane B info */
275 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
276 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
277 dev_priv->saveFPB0 = I915_READ(FPB0);
278 dev_priv->saveFPB1 = I915_READ(FPB1);
279 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
280 if (IS_I965G(dev))
281 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
282 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
283 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
284 dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
285 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
286 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
287 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
288 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
289
290 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
291 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
292 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
293 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
294 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
295 if (IS_I965GM(dev) || IS_GM45(dev)) {
296 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
297 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
298 }
299 i915_save_palette(dev, PIPE_B);
300 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
301
302 /* CRT state */
303 dev_priv->saveADPA = I915_READ(ADPA);
304
305 /* LVDS state */
306 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
307 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
308 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
309 if (IS_I965G(dev))
310 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
311 if (IS_MOBILE(dev) && !IS_I830(dev))
312 dev_priv->saveLVDS = I915_READ(LVDS);
313 if (!IS_I830(dev) && !IS_845G(dev))
314 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
315 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
316 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
317 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
318
319 /* FIXME: save TV & SDVO state */
320
321 /* FBC state */
322 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
323 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
324 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
325 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
326
327 /* Interrupt state */
328 dev_priv->saveIIR = I915_READ(IIR);
329 dev_priv->saveIER = I915_READ(IER);
330 dev_priv->saveIMR = I915_READ(IMR);
331
332 /* VGA state */
333 dev_priv->saveVGA0 = I915_READ(VGA0);
334 dev_priv->saveVGA1 = I915_READ(VGA1);
335 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
336 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
337
338 /* Clock gating state */
339 dev_priv->saveD_STATE = I915_READ(D_STATE);
340 dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
341
342 /* Cache mode state */
343 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
344
345 /* Memory Arbitration state */
346 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
347
348 /* Scratch space */
349 for (i = 0; i < 16; i++) {
350 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
351 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
352 }
353 for (i = 0; i < 3; i++)
354 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
355
356 i915_save_vga(dev);
357
358 return 0;
359}
360
361int i915_restore_state(struct drm_device *dev)
362{
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 int i;
365
366 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
367
368 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
369
370 /* Pipe & plane A info */
371 /* Prime the clock */
372 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
373 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
374 ~DPLL_VCO_ENABLE);
375 DRM_UDELAY(150);
376 }
377 I915_WRITE(FPA0, dev_priv->saveFPA0);
378 I915_WRITE(FPA1, dev_priv->saveFPA1);
379 /* Actually enable it */
380 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
381 DRM_UDELAY(150);
382 if (IS_I965G(dev))
383 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
384 DRM_UDELAY(150);
385
386 /* Restore mode */
387 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
388 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
389 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
390 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
391 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
392 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
393 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
394
395 /* Restore plane info */
396 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
397 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
398 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
399 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
400 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
401 if (IS_I965G(dev)) {
402 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
403 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
404 }
405
406 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
407
408 i915_restore_palette(dev, PIPE_A);
409 /* Enable the plane */
410 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
411 I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
412
413 /* Pipe & plane B info */
414 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
415 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
416 ~DPLL_VCO_ENABLE);
417 DRM_UDELAY(150);
418 }
419 I915_WRITE(FPB0, dev_priv->saveFPB0);
420 I915_WRITE(FPB1, dev_priv->saveFPB1);
421 /* Actually enable it */
422 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
423 DRM_UDELAY(150);
424 if (IS_I965G(dev))
425 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
426 DRM_UDELAY(150);
427
428 /* Restore mode */
429 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
430 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
431 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
432 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
433 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
434 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
435 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
436
437 /* Restore plane info */
438 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
439 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
440 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
441 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
442 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
443 if (IS_I965G(dev)) {
444 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
445 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
446 }
447
448 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
449
450 i915_restore_palette(dev, PIPE_B);
451 /* Enable the plane */
452 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
453 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
454
455 /* CRT state */
456 I915_WRITE(ADPA, dev_priv->saveADPA);
457
458 /* LVDS state */
459 if (IS_I965G(dev))
460 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
461 if (IS_MOBILE(dev) && !IS_I830(dev))
462 I915_WRITE(LVDS, dev_priv->saveLVDS);
463 if (!IS_I830(dev) && !IS_845G(dev))
464 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
465
466 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
467 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
468 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
469 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
470 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
471 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
472
473 /* FIXME: restore TV & SDVO state */
474
475 /* FBC info */
476 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
477 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
478 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
479 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
480
481 /* VGA state */
482 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
483 I915_WRITE(VGA0, dev_priv->saveVGA0);
484 I915_WRITE(VGA1, dev_priv->saveVGA1);
485 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
486 DRM_UDELAY(150);
487
488 /* Clock gating state */
489 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
490 I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
491
492 /* Cache mode state */
493 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
494
495 /* Memory arbitration state */
496 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
497
498 for (i = 0; i < 16; i++) {
499 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
500 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
501 }
502 for (i = 0; i < 3; i++)
503 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
504
505 i915_restore_vga(dev);
506
507 return 0;
508}
509
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5572939fc7d1..97ee566ef749 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
45static struct drm_driver driver = { 45static struct drm_driver driver = {
46 .driver_features = 46 .driver_features =
47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
49 DRIVER_IRQ_VBL,
50 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 49 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
51 .load = mga_driver_load, 50 .load = mga_driver_load,
52 .unload = mga_driver_unload, 51 .unload = mga_driver_unload,
53 .lastclose = mga_driver_lastclose, 52 .lastclose = mga_driver_lastclose,
54 .dma_quiescent = mga_driver_dma_quiescent, 53 .dma_quiescent = mga_driver_dma_quiescent,
55 .device_is_agp = mga_driver_device_is_agp, 54 .device_is_agp = mga_driver_device_is_agp,
56 .vblank_wait = mga_driver_vblank_wait, 55 .get_vblank_counter = mga_get_vblank_counter,
56 .enable_vblank = mga_enable_vblank,
57 .disable_vblank = mga_disable_vblank,
57 .irq_preinstall = mga_driver_irq_preinstall, 58 .irq_preinstall = mga_driver_irq_preinstall,
58 .irq_postinstall = mga_driver_irq_postinstall, 59 .irq_postinstall = mga_driver_irq_postinstall,
59 .irq_uninstall = mga_driver_irq_uninstall, 60 .irq_uninstall = mga_driver_irq_uninstall,
@@ -64,20 +65,20 @@ static struct drm_driver driver = {
64 .ioctls = mga_ioctls, 65 .ioctls = mga_ioctls,
65 .dma_ioctl = mga_dma_buffers, 66 .dma_ioctl = mga_dma_buffers,
66 .fops = { 67 .fops = {
67 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
68 .open = drm_open, 69 .open = drm_open,
69 .release = drm_release, 70 .release = drm_release,
70 .ioctl = drm_ioctl, 71 .ioctl = drm_ioctl,
71 .mmap = drm_mmap, 72 .mmap = drm_mmap,
72 .poll = drm_poll, 73 .poll = drm_poll,
73 .fasync = drm_fasync, 74 .fasync = drm_fasync,
74#ifdef CONFIG_COMPAT 75#ifdef CONFIG_COMPAT
75 .compat_ioctl = mga_compat_ioctl, 76 .compat_ioctl = mga_compat_ioctl,
76#endif 77#endif
77 }, 78 },
78 .pci_driver = { 79 .pci_driver = {
79 .name = DRIVER_NAME, 80 .name = DRIVER_NAME,
80 .id_table = pciidlist, 81 .id_table = pciidlist,
81 }, 82 },
82 83
83 .name = DRIVER_NAME, 84 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index f6ebd24bd587..88257c276eb9 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
120 u32 clear_cmd; 120 u32 clear_cmd;
121 u32 maccess; 121 u32 maccess;
122 122
123 atomic_t vbl_received; /**< Number of vblanks received. */
123 wait_queue_head_t fence_queue; 124 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired; 125 atomic_t last_fence_retired;
125 u32 next_fence_to_post; 126 u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
181extern int mga_warp_init(drm_mga_private_t * dev_priv); 182extern int mga_warp_init(drm_mga_private_t * dev_priv);
182 183
183 /* mga_irq.c */ 184 /* mga_irq.c */
185extern int mga_enable_vblank(struct drm_device *dev, int crtc);
186extern void mga_disable_vblank(struct drm_device *dev, int crtc);
187extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
184extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 188extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
185extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 189extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
186extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 190extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
187extern void mga_driver_irq_preinstall(struct drm_device * dev); 191extern void mga_driver_irq_preinstall(struct drm_device * dev);
188extern void mga_driver_irq_postinstall(struct drm_device * dev); 192extern int mga_driver_irq_postinstall(struct drm_device *dev);
189extern void mga_driver_irq_uninstall(struct drm_device * dev); 193extern void mga_driver_irq_uninstall(struct drm_device * dev);
190extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 194extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
191 unsigned long arg); 195 unsigned long arg);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 9302cb8f0f83..bab42f41188b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -1,5 +1,6 @@
1/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- 1/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
2 * 2 */
3/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 * 5 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the 6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
@@ -35,6 +36,18 @@
35#include "mga_drm.h" 36#include "mga_drm.h"
36#include "mga_drv.h" 37#include "mga_drv.h"
37 38
39u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40{
41 const drm_mga_private_t *const dev_priv =
42 (drm_mga_private_t *) dev->dev_private;
43
44 if (crtc != 0)
45 return 0;
46
47 return atomic_read(&dev_priv->vbl_received);
48}
49
50
38irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 51irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39{ 52{
40 struct drm_device *dev = (struct drm_device *) arg; 53 struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +60,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
47 /* VBLANK interrupt */ 60 /* VBLANK interrupt */
48 if (status & MGA_VLINEPEN) { 61 if (status & MGA_VLINEPEN) {
49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 62 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
50 atomic_inc(&dev->vbl_received); 63 atomic_inc(&dev_priv->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 64 drm_handle_vblank(dev, 0);
52 drm_vbl_send_signals(dev);
53 handled = 1; 65 handled = 1;
54 } 66 }
55 67
@@ -58,6 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
58 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); 70 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
59 const u32 prim_end = MGA_READ(MGA_PRIMEND); 71 const u32 prim_end = MGA_READ(MGA_PRIMEND);
60 72
73
61 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); 74 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
62 75
63 /* In addition to clearing the interrupt-pending bit, we 76 /* In addition to clearing the interrupt-pending bit, we
@@ -72,28 +85,39 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
72 handled = 1; 85 handled = 1;
73 } 86 }
74 87
75 if (handled) { 88 if (handled)
76 return IRQ_HANDLED; 89 return IRQ_HANDLED;
77 }
78 return IRQ_NONE; 90 return IRQ_NONE;
79} 91}
80 92
81int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 93int mga_enable_vblank(struct drm_device *dev, int crtc)
82{ 94{
83 unsigned int cur_vblank; 95 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
84 int ret = 0;
85 96
86 /* Assume that the user has missed the current sequence number 97 if (crtc != 0) {
87 * by about a day rather than she wants to wait for years 98 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
88 * using vertical blanks... 99 crtc);
89 */ 100 return 0;
90 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 101 }
91 (((cur_vblank = atomic_read(&dev->vbl_received))
92 - *sequence) <= (1 << 23)));
93 102
94 *sequence = cur_vblank; 103 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
104 return 0;
105}
95 106
96 return ret; 107
108void mga_disable_vblank(struct drm_device *dev, int crtc)
109{
110 if (crtc != 0) {
111 DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
112 crtc);
113 }
114
115 /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
116 * a nice hardware counter that tracks the number of refreshes when
117 * the interrupt is disabled, and the kernel doesn't know the refresh
118 * rate to calculate an estimate.
119 */
120 /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
97} 121}
98 122
99int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) 123int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +149,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
125 MGA_WRITE(MGA_ICLEAR, ~0); 149 MGA_WRITE(MGA_ICLEAR, ~0);
126} 150}
127 151
128void mga_driver_irq_postinstall(struct drm_device * dev) 152int mga_driver_irq_postinstall(struct drm_device *dev)
129{ 153{
130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
155 int ret;
156
157 ret = drm_vblank_init(dev, 1);
158 if (ret)
159 return ret;
131 160
132 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 161 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
133 162
134 /* Turn on vertical blank interrupt and soft trap interrupt. */ 163 /* Turn on soft trap interrupt. Vertical blank interrupts are enabled
135 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 164 * in mga_enable_vblank.
165 */
166 MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
167 return 0;
136} 168}
137 169
138void mga_driver_irq_uninstall(struct drm_device * dev) 170void mga_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index d3f8aade07b3..b710fab21cb3 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1022,7 +1022,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
1022 1022
1023 switch (param->param) { 1023 switch (param->param) {
1024 case MGA_PARAM_IRQ_NR: 1024 case MGA_PARAM_IRQ_NR:
1025 value = dev->irq; 1025 value = drm_dev_to_irq(dev);
1026 break; 1026 break;
1027 case MGA_PARAM_CARD_TYPE: 1027 case MGA_PARAM_CARD_TYPE:
1028 value = dev_priv->chipset; 1028 value = dev_priv->chipset;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 6108e7587e12..3265d53ba91f 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
43static struct drm_driver driver = { 43static struct drm_driver driver = {
44 .driver_features = 44 .driver_features =
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
47 DRIVER_IRQ_VBL,
48 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
49 .preclose = r128_driver_preclose, 48 .preclose = r128_driver_preclose,
50 .lastclose = r128_driver_lastclose, 49 .lastclose = r128_driver_lastclose,
51 .vblank_wait = r128_driver_vblank_wait, 50 .get_vblank_counter = r128_get_vblank_counter,
51 .enable_vblank = r128_enable_vblank,
52 .disable_vblank = r128_disable_vblank,
52 .irq_preinstall = r128_driver_irq_preinstall, 53 .irq_preinstall = r128_driver_irq_preinstall,
53 .irq_postinstall = r128_driver_irq_postinstall, 54 .irq_postinstall = r128_driver_irq_postinstall,
54 .irq_uninstall = r128_driver_irq_uninstall, 55 .irq_uninstall = r128_driver_irq_uninstall,
@@ -59,21 +60,20 @@ static struct drm_driver driver = {
59 .ioctls = r128_ioctls, 60 .ioctls = r128_ioctls,
60 .dma_ioctl = r128_cce_buffers, 61 .dma_ioctl = r128_cce_buffers,
61 .fops = { 62 .fops = {
62 .owner = THIS_MODULE, 63 .owner = THIS_MODULE,
63 .open = drm_open, 64 .open = drm_open,
64 .release = drm_release, 65 .release = drm_release,
65 .ioctl = drm_ioctl, 66 .ioctl = drm_ioctl,
66 .mmap = drm_mmap, 67 .mmap = drm_mmap,
67 .poll = drm_poll, 68 .poll = drm_poll,
68 .fasync = drm_fasync, 69 .fasync = drm_fasync,
69#ifdef CONFIG_COMPAT 70#ifdef CONFIG_COMPAT
70 .compat_ioctl = r128_compat_ioctl, 71 .compat_ioctl = r128_compat_ioctl,
71#endif 72#endif
72 }, 73 },
73
74 .pci_driver = { 74 .pci_driver = {
75 .name = DRIVER_NAME, 75 .name = DRIVER_NAME,
76 .id_table = pciidlist, 76 .id_table = pciidlist,
77 }, 77 },
78 78
79 .name = DRIVER_NAME, 79 .name = DRIVER_NAME,
@@ -87,6 +87,7 @@ static struct drm_driver driver = {
87static int __init r128_init(void) 87static int __init r128_init(void)
88{ 88{
89 driver.num_ioctls = r128_max_ioctl; 89 driver.num_ioctls = r128_max_ioctl;
90
90 return drm_init(&driver); 91 return drm_init(&driver);
91} 92}
92 93
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 011105e51ac6..5898b274279d 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -29,7 +29,7 @@
29 * Rickard E. (Rik) Faith <faith@valinux.com> 29 * Rickard E. (Rik) Faith <faith@valinux.com>
30 * Kevin E. Martin <martin@valinux.com> 30 * Kevin E. Martin <martin@valinux.com>
31 * Gareth Hughes <gareth@valinux.com> 31 * Gareth Hughes <gareth@valinux.com>
32 * Michel Dänzer <daenzerm@student.ethz.ch> 32 * Michel D�zer <daenzerm@student.ethz.ch>
33 */ 33 */
34 34
35#ifndef __R128_DRV_H__ 35#ifndef __R128_DRV_H__
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
97 u32 crtc_offset; 97 u32 crtc_offset;
98 u32 crtc_offset_cntl; 98 u32 crtc_offset_cntl;
99 99
100 atomic_t vbl_received;
101
100 u32 color_fmt; 102 u32 color_fmt;
101 unsigned int front_offset; 103 unsigned int front_offset;
102 unsigned int front_pitch; 104 unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
149extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 151extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
150extern int r128_do_cleanup_cce(struct drm_device * dev); 152extern int r128_do_cleanup_cce(struct drm_device * dev);
151 153
152extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 154extern int r128_enable_vblank(struct drm_device *dev, int crtc);
153 155extern void r128_disable_vblank(struct drm_device *dev, int crtc);
156extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
154extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
155extern void r128_driver_irq_preinstall(struct drm_device * dev); 158extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev); 159extern int r128_driver_irq_postinstall(struct drm_device *dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev, 162extern void r128_driver_preclose(struct drm_device * dev,
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index c76fdca7662d..d7349012a680 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -35,6 +35,16 @@
35#include "r128_drm.h" 35#include "r128_drm.h"
36#include "r128_drv.h" 36#include "r128_drv.h"
37 37
38u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
39{
40 const drm_r128_private_t *dev_priv = dev->dev_private;
41
42 if (crtc != 0)
43 return 0;
44
45 return atomic_read(&dev_priv->vbl_received);
46}
47
38irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 48irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39{ 49{
40 struct drm_device *dev = (struct drm_device *) arg; 50 struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
46 /* VBLANK interrupt */ 56 /* VBLANK interrupt */
47 if (status & R128_CRTC_VBLANK_INT) { 57 if (status & R128_CRTC_VBLANK_INT) {
48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 58 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
49 atomic_inc(&dev->vbl_received); 59 atomic_inc(&dev_priv->vbl_received);
50 DRM_WAKEUP(&dev->vbl_queue); 60 drm_handle_vblank(dev, 0);
51 drm_vbl_send_signals(dev);
52 return IRQ_HANDLED; 61 return IRQ_HANDLED;
53 } 62 }
54 return IRQ_NONE; 63 return IRQ_NONE;
55} 64}
56 65
57int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 66int r128_enable_vblank(struct drm_device *dev, int crtc)
58{ 67{
59 unsigned int cur_vblank; 68 drm_r128_private_t *dev_priv = dev->dev_private;
60 int ret = 0;
61 69
62 /* Assume that the user has missed the current sequence number 70 if (crtc != 0) {
63 * by about a day rather than she wants to wait for years 71 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
64 * using vertical blanks... 72 return -EINVAL;
65 */ 73 }
66 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
67 (((cur_vblank = atomic_read(&dev->vbl_received))
68 - *sequence) <= (1 << 23)));
69 74
70 *sequence = cur_vblank; 75 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
76 return 0;
77}
78
79void r128_disable_vblank(struct drm_device *dev, int crtc)
80{
81 if (crtc != 0)
82 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
71 83
72 return ret; 84 /*
85 * FIXME: implement proper interrupt disable by using the vblank
86 * counter register (if available)
87 *
88 * R128_WRITE(R128_GEN_INT_CNTL,
89 * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
90 */
73} 91}
74 92
75void r128_driver_irq_preinstall(struct drm_device * dev) 93void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 100 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
83} 101}
84 102
85void r128_driver_irq_postinstall(struct drm_device * dev) 103int r128_driver_irq_postinstall(struct drm_device *dev)
86{ 104{
87 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 105 return drm_vblank_init(dev, 1);
88
89 /* Turn on VBL interrupt */
90 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
91} 106}
92 107
93void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 51a9afce7b9b..f7a5b5740764 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1629,7 +1629,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
1629 1629
1630 switch (param->param) { 1630 switch (param->param) {
1631 case R128_PARAM_IRQ_NR: 1631 case R128_PARAM_IRQ_NR:
1632 value = dev->irq; 1632 value = drm_dev_to_irq(dev);
1633 break; 1633 break;
1634 default: 1634 default:
1635 return -EINVAL; 1635 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 248ab4a7d39f..59a2132a8f57 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -71,7 +71,8 @@ static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
71 71
72static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 72static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
73{ 73{
74 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 74 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
75 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
75 return RS690_READ_MCIND(dev_priv, addr); 76 return RS690_READ_MCIND(dev_priv, addr);
76 else 77 else
77 return RS480_READ_MCIND(dev_priv, addr); 78 return RS480_READ_MCIND(dev_priv, addr);
@@ -82,7 +83,8 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
82 83
83 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 84 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
84 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); 85 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
85 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 86 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
87 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
86 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); 88 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
87 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 89 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
88 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); 90 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
@@ -94,7 +96,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
94{ 96{
95 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 97 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
96 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); 98 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
97 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 99 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
100 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
98 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); 101 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
99 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 102 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
100 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); 103 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
@@ -106,7 +109,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
106{ 109{
107 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 110 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
108 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); 111 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
109 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 112 else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
113 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
110 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); 114 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
111 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 115 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
112 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); 116 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
@@ -122,15 +126,17 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
122 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { 126 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
123 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); 127 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
124 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); 128 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
125 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { 129 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
130 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
126 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); 131 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
127 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); 132 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
128 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { 133 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
129 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); 134 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
130 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); 135 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
131 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) { 136 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
137 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
132 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 138 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
133 RADEON_WRITE(RS480_AGP_BASE_2, 0); 139 RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
134 } else { 140 } else {
135 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 141 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
136 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) 142 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
@@ -347,6 +353,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
347 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || 353 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
348 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || 354 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
349 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 355 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
356 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
350 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 357 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
351 DRM_INFO("Loading R300 Microcode\n"); 358 DRM_INFO("Loading R300 Microcode\n");
352 for (i = 0; i < 256; i++) { 359 for (i = 0; i < 256; i++) {
@@ -356,6 +363,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
356 R300_cp_microcode[i][0]); 363 R300_cp_microcode[i][0]);
357 } 364 }
358 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || 365 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
366 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
359 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { 367 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
360 DRM_INFO("Loading R400 Microcode\n"); 368 DRM_INFO("Loading R400 Microcode\n");
361 for (i = 0; i < 256; i++) { 369 for (i = 0; i < 256; i++) {
@@ -364,8 +372,9 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
364 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 372 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
365 R420_cp_microcode[i][0]); 373 R420_cp_microcode[i][0]);
366 } 374 }
367 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { 375 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
368 DRM_INFO("Loading RS690 Microcode\n"); 376 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
377 DRM_INFO("Loading RS690/RS740 Microcode\n");
369 for (i = 0; i < 256; i++) { 378 for (i = 0; i < 256; i++) {
370 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 379 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
371 RS690_cp_microcode[i][1]); 380 RS690_cp_microcode[i][1]);
@@ -626,8 +635,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
626 dev_priv->ring.size_l2qw); 635 dev_priv->ring.size_l2qw);
627#endif 636#endif
628 637
629 /* Start with assuming that writeback doesn't work */
630 dev_priv->writeback_works = 0;
631 638
632 /* Initialize the scratch register pointer. This will cause 639 /* Initialize the scratch register pointer. This will cause
633 * the scratch register values to be written out to memory 640 * the scratch register values to be written out to memory
@@ -646,8 +653,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
646 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); 653 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
647 654
648 /* Turn on bus mastering */ 655 /* Turn on bus mastering */
649 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 656 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
650 RADEON_WRITE(RADEON_BUS_CNTL, tmp); 657 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
658 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
659 /* rs400, rs690/rs740 */
660 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS;
661 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
662 } else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
663 ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) {
664 /* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
665 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
666 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
667 } /* PCIE cards appears to not need this */
651 668
652 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; 669 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
653 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); 670 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
@@ -674,6 +691,9 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
674{ 691{
675 u32 tmp; 692 u32 tmp;
676 693
694 /* Start with assuming that writeback doesn't work */
695 dev_priv->writeback_works = 0;
696
677 /* Writeback doesn't seem to work everywhere, test it here and possibly 697 /* Writeback doesn't seem to work everywhere, test it here and possibly
678 * enable it if it appears to work 698 * enable it if it appears to work
679 */ 699 */
@@ -719,7 +739,8 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
719 dev_priv->gart_size); 739 dev_priv->gart_size);
720 740
721 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); 741 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
722 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 742 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
743 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
723 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | 744 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
724 RS690_BLOCK_GFX_D3_EN)); 745 RS690_BLOCK_GFX_D3_EN));
725 else 746 else
@@ -812,6 +833,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
812 u32 tmp; 833 u32 tmp;
813 834
814 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 835 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
836 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
815 (dev_priv->flags & RADEON_IS_IGPGART)) { 837 (dev_priv->flags & RADEON_IS_IGPGART)) {
816 radeon_set_igpgart(dev_priv, on); 838 radeon_set_igpgart(dev_priv, on);
817 return; 839 return;
@@ -1286,7 +1308,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1286 radeon_cp_init_ring_buffer(dev, dev_priv); 1308 radeon_cp_init_ring_buffer(dev, dev_priv);
1287 1309
1288 radeon_do_engine_reset(dev); 1310 radeon_do_engine_reset(dev);
1289 radeon_enable_interrupt(dev); 1311 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1290 1312
1291 DRM_DEBUG("radeon_do_resume_cp() complete\n"); 1313 DRM_DEBUG("radeon_do_resume_cp() complete\n");
1292 1314
@@ -1708,6 +1730,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1708 case CHIP_R300: 1730 case CHIP_R300:
1709 case CHIP_R350: 1731 case CHIP_R350:
1710 case CHIP_R420: 1732 case CHIP_R420:
1733 case CHIP_R423:
1711 case CHIP_RV410: 1734 case CHIP_RV410:
1712 case CHIP_RV515: 1735 case CHIP_RV515:
1713 case CHIP_R520: 1736 case CHIP_R520:
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 637bd7faf132..71af746a4e47 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -52,6 +52,28 @@ static int dri_library_name(struct drm_device *dev, char *buf)
52 "r300")); 52 "r300"));
53} 53}
54 54
55static int radeon_suspend(struct drm_device *dev, pm_message_t state)
56{
57 drm_radeon_private_t *dev_priv = dev->dev_private;
58
59 /* Disable *all* interrupts */
60 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
61 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
62 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
63 return 0;
64}
65
66static int radeon_resume(struct drm_device *dev)
67{
68 drm_radeon_private_t *dev_priv = dev->dev_private;
69
70 /* Restore interrupt registers */
71 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
72 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
73 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
74 return 0;
75}
76
55static struct pci_device_id pciidlist[] = { 77static struct pci_device_id pciidlist[] = {
56 radeon_PCI_IDS 78 radeon_PCI_IDS
57}; 79};
@@ -59,8 +81,7 @@ static struct pci_device_id pciidlist[] = {
59static struct drm_driver driver = { 81static struct drm_driver driver = {
60 .driver_features = 82 .driver_features =
61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 83 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 84 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
63 DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
64 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 85 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
65 .load = radeon_driver_load, 86 .load = radeon_driver_load,
66 .firstopen = radeon_driver_firstopen, 87 .firstopen = radeon_driver_firstopen,
@@ -69,8 +90,11 @@ static struct drm_driver driver = {
69 .postclose = radeon_driver_postclose, 90 .postclose = radeon_driver_postclose,
70 .lastclose = radeon_driver_lastclose, 91 .lastclose = radeon_driver_lastclose,
71 .unload = radeon_driver_unload, 92 .unload = radeon_driver_unload,
72 .vblank_wait = radeon_driver_vblank_wait, 93 .suspend = radeon_suspend,
73 .vblank_wait2 = radeon_driver_vblank_wait2, 94 .resume = radeon_resume,
95 .get_vblank_counter = radeon_get_vblank_counter,
96 .enable_vblank = radeon_enable_vblank,
97 .disable_vblank = radeon_disable_vblank,
74 .dri_library_name = dri_library_name, 98 .dri_library_name = dri_library_name,
75 .irq_preinstall = radeon_driver_irq_preinstall, 99 .irq_preinstall = radeon_driver_irq_preinstall,
76 .irq_postinstall = radeon_driver_irq_postinstall, 100 .irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 099381693175..4dbb813910c3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -122,9 +122,12 @@ enum radeon_family {
122 CHIP_RV350, 122 CHIP_RV350,
123 CHIP_RV380, 123 CHIP_RV380,
124 CHIP_R420, 124 CHIP_R420,
125 CHIP_R423,
125 CHIP_RV410, 126 CHIP_RV410,
127 CHIP_RS400,
126 CHIP_RS480, 128 CHIP_RS480,
127 CHIP_RS690, 129 CHIP_RS690,
130 CHIP_RS740,
128 CHIP_RV515, 131 CHIP_RV515,
129 CHIP_R520, 132 CHIP_R520,
130 CHIP_RV530, 133 CHIP_RV530,
@@ -378,17 +381,17 @@ extern void radeon_mem_release(struct drm_file *file_priv,
378 struct mem_block *heap); 381 struct mem_block *heap);
379 382
380 /* radeon_irq.c */ 383 /* radeon_irq.c */
384extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
381extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); 385extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
382extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 386extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
383 387
384extern void radeon_do_release(struct drm_device * dev); 388extern void radeon_do_release(struct drm_device * dev);
385extern int radeon_driver_vblank_wait(struct drm_device * dev, 389extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
386 unsigned int *sequence); 390extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
387extern int radeon_driver_vblank_wait2(struct drm_device * dev, 391extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
388 unsigned int *sequence);
389extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 392extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
390extern void radeon_driver_irq_preinstall(struct drm_device * dev); 393extern void radeon_driver_irq_preinstall(struct drm_device * dev);
391extern void radeon_driver_irq_postinstall(struct drm_device * dev); 394extern int radeon_driver_irq_postinstall(struct drm_device *dev);
392extern void radeon_driver_irq_uninstall(struct drm_device * dev); 395extern void radeon_driver_irq_uninstall(struct drm_device * dev);
393extern void radeon_enable_interrupt(struct drm_device *dev); 396extern void radeon_enable_interrupt(struct drm_device *dev);
394extern int radeon_vblank_crtc_get(struct drm_device *dev); 397extern int radeon_vblank_crtc_get(struct drm_device *dev);
@@ -397,19 +400,22 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
397extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 400extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
398extern int radeon_driver_unload(struct drm_device *dev); 401extern int radeon_driver_unload(struct drm_device *dev);
399extern int radeon_driver_firstopen(struct drm_device *dev); 402extern int radeon_driver_firstopen(struct drm_device *dev);
400extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); 403extern void radeon_driver_preclose(struct drm_device *dev,
401extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); 404 struct drm_file *file_priv);
405extern void radeon_driver_postclose(struct drm_device *dev,
406 struct drm_file *file_priv);
402extern void radeon_driver_lastclose(struct drm_device * dev); 407extern void radeon_driver_lastclose(struct drm_device * dev);
403extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); 408extern int radeon_driver_open(struct drm_device *dev,
409 struct drm_file *file_priv);
404extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 410extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
405 unsigned long arg); 411 unsigned long arg);
406 412
407/* r300_cmdbuf.c */ 413/* r300_cmdbuf.c */
408extern void r300_init_reg_flags(struct drm_device *dev); 414extern void r300_init_reg_flags(struct drm_device *dev);
409 415
410extern int r300_do_cp_cmdbuf(struct drm_device * dev, 416extern int r300_do_cp_cmdbuf(struct drm_device *dev,
411 struct drm_file *file_priv, 417 struct drm_file *file_priv,
412 drm_radeon_kcmd_buffer_t * cmdbuf); 418 drm_radeon_kcmd_buffer_t *cmdbuf);
413 419
414/* Flags for stats.boxes 420/* Flags for stats.boxes
415 */ 421 */
@@ -434,8 +440,31 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
434# define RADEON_SCISSOR_1_ENABLE (1 << 29) 440# define RADEON_SCISSOR_1_ENABLE (1 << 29)
435# define RADEON_SCISSOR_2_ENABLE (1 << 30) 441# define RADEON_SCISSOR_2_ENABLE (1 << 30)
436 442
443/*
444 * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
445 * don't have an explicit bus mastering disable bit. It's handled
446 * by the PCI D-states. PMI_BM_DIS disables D-state bus master
447 * handling, not bus mastering itself.
448 */
437#define RADEON_BUS_CNTL 0x0030 449#define RADEON_BUS_CNTL 0x0030
450/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
438# define RADEON_BUS_MASTER_DIS (1 << 6) 451# define RADEON_BUS_MASTER_DIS (1 << 6)
452/* rs400, rs690/rs740 */
453# define RS400_BUS_MASTER_DIS (1 << 14)
454# define RS400_MSI_REARM (1 << 20)
455/* see RS480_MSI_REARM in AIC_CNTL for rs480 */
456
457#define RADEON_BUS_CNTL1 0x0034
458# define RADEON_PMI_BM_DIS (1 << 2)
459# define RADEON_PMI_INT_DIS (1 << 3)
460
461#define RV370_BUS_CNTL 0x004c
462# define RV370_PMI_BM_DIS (1 << 5)
463# define RV370_PMI_INT_DIS (1 << 6)
464
465#define RADEON_MSI_REARM_EN 0x0160
466/* rv370/rv380, rv410, r423/r430/r480, r5xx */
467# define RV370_MSI_REARM_EN (1 << 0)
439 468
440#define RADEON_CLOCK_CNTL_DATA 0x000c 469#define RADEON_CLOCK_CNTL_DATA 0x000c
441# define RADEON_PLL_WR_EN (1 << 7) 470# define RADEON_PLL_WR_EN (1 << 7)
@@ -623,6 +652,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
623# define RADEON_SW_INT_TEST (1 << 25) 652# define RADEON_SW_INT_TEST (1 << 25)
624# define RADEON_SW_INT_TEST_ACK (1 << 25) 653# define RADEON_SW_INT_TEST_ACK (1 << 25)
625# define RADEON_SW_INT_FIRE (1 << 26) 654# define RADEON_SW_INT_FIRE (1 << 26)
655# define R500_DISPLAY_INT_STATUS (1 << 0)
626 656
627#define RADEON_HOST_PATH_CNTL 0x0130 657#define RADEON_HOST_PATH_CNTL 0x0130
628# define RADEON_HDP_SOFT_RESET (1 << 26) 658# define RADEON_HDP_SOFT_RESET (1 << 26)
@@ -907,6 +937,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
907 937
908#define RADEON_AIC_CNTL 0x01d0 938#define RADEON_AIC_CNTL 0x01d0
909# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) 939# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
940# define RS480_MSI_REARM (1 << 3)
910#define RADEON_AIC_STAT 0x01d4 941#define RADEON_AIC_STAT 0x01d4
911#define RADEON_AIC_PT_BASE 0x01d8 942#define RADEON_AIC_PT_BASE 0x01d8
912#define RADEON_AIC_LO_ADDR 0x01dc 943#define RADEON_AIC_LO_ADDR 0x01dc
@@ -1116,6 +1147,9 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
1116 1147
1117#define R200_VAP_PVS_CNTL_1 0x22D0 1148#define R200_VAP_PVS_CNTL_1 0x22D0
1118 1149
1150#define RADEON_CRTC_CRNT_FRAME 0x0214
1151#define RADEON_CRTC2_CRNT_FRAME 0x0314
1152
1119#define R500_D1CRTC_STATUS 0x609c 1153#define R500_D1CRTC_STATUS 0x609c
1120#define R500_D2CRTC_STATUS 0x689c 1154#define R500_D2CRTC_STATUS 0x689c
1121#define R500_CRTC_V_BLANK (1<<0) 1155#define R500_CRTC_V_BLANK (1<<0)
@@ -1200,7 +1234,8 @@ do { \
1200 1234
1201#define IGP_WRITE_MCIND(addr, val) \ 1235#define IGP_WRITE_MCIND(addr, val) \
1202do { \ 1236do { \
1203 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ 1237 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \
1238 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \
1204 RS690_WRITE_MCIND(addr, val); \ 1239 RS690_WRITE_MCIND(addr, val); \
1205 else \ 1240 else \
1206 RS480_WRITE_MCIND(addr, val); \ 1241 RS480_WRITE_MCIND(addr, val); \
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index ee40d197deb7..5079f7054a2f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -27,7 +27,7 @@
27 * 27 *
28 * Authors: 28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com> 29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Michel Dänzer <michel@daenzer.net> 30 * Michel D�zer <michel@daenzer.net>
31 */ 31 */
32 32
33#include "drmP.h" 33#include "drmP.h"
@@ -35,12 +35,128 @@
35#include "radeon_drm.h" 35#include "radeon_drm.h"
36#include "radeon_drv.h" 36#include "radeon_drv.h"
37 37
38static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, 38void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
39 u32 mask)
40{ 39{
41 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; 40 drm_radeon_private_t *dev_priv = dev->dev_private;
41
42 if (state)
43 dev_priv->irq_enable_reg |= mask;
44 else
45 dev_priv->irq_enable_reg &= ~mask;
46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48}
49
50static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
51{
52 drm_radeon_private_t *dev_priv = dev->dev_private;
53
54 if (state)
55 dev_priv->r500_disp_irq_reg |= mask;
56 else
57 dev_priv->r500_disp_irq_reg &= ~mask;
58
59 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
60}
61
62int radeon_enable_vblank(struct drm_device *dev, int crtc)
63{
64 drm_radeon_private_t *dev_priv = dev->dev_private;
65
66 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
67 switch (crtc) {
68 case 0:
69 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
70 break;
71 case 1:
72 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
73 break;
74 default:
75 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
76 crtc);
77 return EINVAL;
78 }
79 } else {
80 switch (crtc) {
81 case 0:
82 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
83 break;
84 case 1:
85 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
86 break;
87 default:
88 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
89 crtc);
90 return EINVAL;
91 }
92 }
93
94 return 0;
95}
96
97void radeon_disable_vblank(struct drm_device *dev, int crtc)
98{
99 drm_radeon_private_t *dev_priv = dev->dev_private;
100
101 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
102 switch (crtc) {
103 case 0:
104 r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
105 break;
106 case 1:
107 r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
108 break;
109 default:
110 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
111 crtc);
112 break;
113 }
114 } else {
115 switch (crtc) {
116 case 0:
117 radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
118 break;
119 case 1:
120 radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
121 break;
122 default:
123 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
124 crtc);
125 break;
126 }
127 }
128}
129
130static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
131{
132 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
133 u32 irq_mask = RADEON_SW_INT_TEST;
134
135 *r500_disp_int = 0;
136 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
137 /* vbl interrupts in a different place */
138
139 if (irqs & R500_DISPLAY_INT_STATUS) {
140 /* if a display interrupt */
141 u32 disp_irq;
142
143 disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
144
145 *r500_disp_int = disp_irq;
146 if (disp_irq & R500_D1_VBLANK_INTERRUPT)
147 RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
148 if (disp_irq & R500_D2_VBLANK_INTERRUPT)
149 RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
150 }
151 irq_mask |= R500_DISPLAY_INT_STATUS;
152 } else
153 irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
154
155 irqs &= irq_mask;
156
42 if (irqs) 157 if (irqs)
43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 158 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
159
44 return irqs; 160 return irqs;
45} 161}
46 162
@@ -68,44 +184,33 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
68 drm_radeon_private_t *dev_priv = 184 drm_radeon_private_t *dev_priv =
69 (drm_radeon_private_t *) dev->dev_private; 185 (drm_radeon_private_t *) dev->dev_private;
70 u32 stat; 186 u32 stat;
187 u32 r500_disp_int;
71 188
72 /* Only consider the bits we're interested in - others could be used 189 /* Only consider the bits we're interested in - others could be used
73 * outside the DRM 190 * outside the DRM
74 */ 191 */
75 stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 192 stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
76 RADEON_CRTC_VBLANK_STAT |
77 RADEON_CRTC2_VBLANK_STAT));
78 if (!stat) 193 if (!stat)
79 return IRQ_NONE; 194 return IRQ_NONE;
80 195
81 stat &= dev_priv->irq_enable_reg; 196 stat &= dev_priv->irq_enable_reg;
82 197
83 /* SW interrupt */ 198 /* SW interrupt */
84 if (stat & RADEON_SW_INT_TEST) { 199 if (stat & RADEON_SW_INT_TEST)
85 DRM_WAKEUP(&dev_priv->swi_queue); 200 DRM_WAKEUP(&dev_priv->swi_queue);
86 }
87 201
88 /* VBLANK interrupt */ 202 /* VBLANK interrupt */
89 if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { 203 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
90 int vblank_crtc = dev_priv->vblank_crtc; 204 if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
91 205 drm_handle_vblank(dev, 0);
92 if ((vblank_crtc & 206 if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
93 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) == 207 drm_handle_vblank(dev, 1);
94 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 208 } else {
95 if (stat & RADEON_CRTC_VBLANK_STAT) 209 if (stat & RADEON_CRTC_VBLANK_STAT)
96 atomic_inc(&dev->vbl_received); 210 drm_handle_vblank(dev, 0);
97 if (stat & RADEON_CRTC2_VBLANK_STAT) 211 if (stat & RADEON_CRTC2_VBLANK_STAT)
98 atomic_inc(&dev->vbl_received2); 212 drm_handle_vblank(dev, 1);
99 } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
100 (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
101 ((stat & RADEON_CRTC2_VBLANK_STAT) &&
102 (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
103 atomic_inc(&dev->vbl_received);
104
105 DRM_WAKEUP(&dev->vbl_queue);
106 drm_vbl_send_signals(dev);
107 } 213 }
108
109 return IRQ_HANDLED; 214 return IRQ_HANDLED;
110} 215}
111 216
@@ -144,54 +249,31 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
144 return ret; 249 return ret;
145} 250}
146 251
147static int radeon_driver_vblank_do_wait(struct drm_device * dev, 252u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
148 unsigned int *sequence, int crtc)
149{ 253{
150 drm_radeon_private_t *dev_priv = 254 drm_radeon_private_t *dev_priv = dev->dev_private;
151 (drm_radeon_private_t *) dev->dev_private; 255
152 unsigned int cur_vblank;
153 int ret = 0;
154 int ack = 0;
155 atomic_t *counter;
156 if (!dev_priv) { 256 if (!dev_priv) {
157 DRM_ERROR("called with no initialization\n"); 257 DRM_ERROR("called with no initialization\n");
158 return -EINVAL; 258 return -EINVAL;
159 } 259 }
160 260
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { 261 if (crtc < 0 || crtc > 1) {
162 counter = &dev->vbl_received; 262 DRM_ERROR("Invalid crtc %d\n", crtc);
163 ack |= RADEON_CRTC_VBLANK_STAT;
164 } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
165 counter = &dev->vbl_received2;
166 ack |= RADEON_CRTC2_VBLANK_STAT;
167 } else
168 return -EINVAL; 263 return -EINVAL;
264 }
169 265
170 radeon_acknowledge_irqs(dev_priv, ack); 266 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
171 267 if (crtc == 0)
172 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 268 return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
173 269 else
174 /* Assume that the user has missed the current sequence number 270 return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
175 * by about a day rather than she wants to wait for years 271 } else {
176 * using vertical blanks... 272 if (crtc == 0)
177 */ 273 return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
178 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 274 else
179 (((cur_vblank = atomic_read(counter)) 275 return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
180 - *sequence) <= (1 << 23))); 276 }
181
182 *sequence = cur_vblank;
183
184 return ret;
185}
186
187int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
188{
189 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
190}
191
192int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
193{
194 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
195} 277}
196 278
197/* Needs the lock as it touches the ring. 279/* Needs the lock as it touches the ring.
@@ -234,46 +316,41 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
234 return radeon_wait_irq(dev, irqwait->irq_seq); 316 return radeon_wait_irq(dev, irqwait->irq_seq);
235} 317}
236 318
237void radeon_enable_interrupt(struct drm_device *dev)
238{
239 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
240
241 dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
242 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
243 dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
244
245 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
246 dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
247
248 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
249 dev_priv->irq_enabled = 1;
250}
251
252/* drm_dma.h hooks 319/* drm_dma.h hooks
253*/ 320*/
254void radeon_driver_irq_preinstall(struct drm_device * dev) 321void radeon_driver_irq_preinstall(struct drm_device * dev)
255{ 322{
256 drm_radeon_private_t *dev_priv = 323 drm_radeon_private_t *dev_priv =
257 (drm_radeon_private_t *) dev->dev_private; 324 (drm_radeon_private_t *) dev->dev_private;
325 u32 dummy;
258 326
259 /* Disable *all* interrupts */ 327 /* Disable *all* interrupts */
328 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
329 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 330 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
261 331
262 /* Clear bits if they're already high */ 332 /* Clear bits if they're already high */
263 radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 333 radeon_acknowledge_irqs(dev_priv, &dummy);
264 RADEON_CRTC_VBLANK_STAT |
265 RADEON_CRTC2_VBLANK_STAT));
266} 334}
267 335
268void radeon_driver_irq_postinstall(struct drm_device * dev) 336int radeon_driver_irq_postinstall(struct drm_device *dev)
269{ 337{
270 drm_radeon_private_t *dev_priv = 338 drm_radeon_private_t *dev_priv =
271 (drm_radeon_private_t *) dev->dev_private; 339 (drm_radeon_private_t *) dev->dev_private;
340 int ret;
272 341
273 atomic_set(&dev_priv->swi_emitted, 0); 342 atomic_set(&dev_priv->swi_emitted, 0);
274 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 343 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
275 344
276 radeon_enable_interrupt(dev); 345 ret = drm_vblank_init(dev, 2);
346 if (ret)
347 return ret;
348
349 dev->max_vblank_count = 0x001fffff;
350
351 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
352
353 return 0;
277} 354}
278 355
279void radeon_driver_irq_uninstall(struct drm_device * dev) 356void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -285,6 +362,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
285 362
286 dev_priv->irq_enabled = 0; 363 dev_priv->irq_enabled = 0;
287 364
365 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
366 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
288 /* Disable *all* interrupts */ 367 /* Disable *all* interrupts */
289 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 368 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
290} 369}
@@ -293,18 +372,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
293int radeon_vblank_crtc_get(struct drm_device *dev) 372int radeon_vblank_crtc_get(struct drm_device *dev)
294{ 373{
295 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 374 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
296 u32 flag;
297 u32 value;
298
299 flag = RADEON_READ(RADEON_GEN_INT_CNTL);
300 value = 0;
301
302 if (flag & RADEON_CRTC_VBLANK_MASK)
303 value |= DRM_RADEON_VBLANK_CRTC1;
304 375
305 if (flag & RADEON_CRTC2_VBLANK_MASK) 376 return dev_priv->vblank_crtc;
306 value |= DRM_RADEON_VBLANK_CRTC2;
307 return value;
308} 377}
309 378
310int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) 379int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
@@ -315,6 +384,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
315 return -EINVAL; 384 return -EINVAL;
316 } 385 }
317 dev_priv->vblank_crtc = (unsigned int)value; 386 dev_priv->vblank_crtc = (unsigned int)value;
318 radeon_enable_interrupt(dev);
319 return 0; 387 return 0;
320} 388}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 11c146b49211..5d7153fcc7b0 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -2997,7 +2997,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
2997 value = GET_SCRATCH(2); 2997 value = GET_SCRATCH(2);
2998 break; 2998 break;
2999 case RADEON_PARAM_IRQ_NR: 2999 case RADEON_PARAM_IRQ_NR:
3000 value = dev->irq; 3000 value = drm_dev_to_irq(dev);
3001 break; 3001 break;
3002 case RADEON_PARAM_GART_BASE: 3002 case RADEON_PARAM_GART_BASE:
3003 value = dev_priv->gart_vm_start; 3003 value = dev_priv->gart_vm_start;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index b3878770fce1..af22111397d8 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,7 +41,7 @@
41#define AGP_TYPE 1 41#define AGP_TYPE 1
42 42
43 43
44#if defined(CONFIG_FB_SIS) 44#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
45/* fb management via fb device */ 45/* fb management via fb device */
46 46
47#define SIS_MM_ALIGN_SHIFT 0 47#define SIS_MM_ALIGN_SHIFT 0
@@ -57,7 +57,7 @@ static void *sis_sman_mm_allocate(void *private, unsigned long size,
57 if (req.size == 0) 57 if (req.size == 0)
58 return NULL; 58 return NULL;
59 else 59 else
60 return (void *)~req.offset; 60 return (void *)(unsigned long)~req.offset;
61} 61}
62 62
63static void sis_sman_mm_free(void *private, void *ref) 63static void sis_sman_mm_free(void *private, void *ref)
@@ -75,12 +75,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
75 return ~((unsigned long)ref); 75 return ~((unsigned long)ref);
76} 76}
77 77
78#else /* CONFIG_FB_SIS */ 78#else /* CONFIG_FB_SIS[_MODULE] */
79 79
80#define SIS_MM_ALIGN_SHIFT 4 80#define SIS_MM_ALIGN_SHIFT 4
81#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) 81#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
82 82
83#endif /* CONFIG_FB_SIS */ 83#endif /* CONFIG_FB_SIS[_MODULE] */
84 84
85static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 85static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
86{ 86{
@@ -89,7 +89,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
89 int ret; 89 int ret;
90 90
91 mutex_lock(&dev->struct_mutex); 91 mutex_lock(&dev->struct_mutex);
92#if defined(CONFIG_FB_SIS) 92#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
93 { 93 {
94 struct drm_sman_mm sman_mm; 94 struct drm_sman_mm sman_mm;
95 sman_mm.private = (void *)0xFFFFFFFF; 95 sman_mm.private = (void *)0xFFFFFFFF;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 80c01cdfa37d..0993b441fc42 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
40static struct drm_driver driver = { 40static struct drm_driver driver = {
41 .driver_features = 41 .driver_features =
42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 43 DRIVER_IRQ_SHARED,
44 .load = via_driver_load, 44 .load = via_driver_load,
45 .unload = via_driver_unload, 45 .unload = via_driver_unload,
46 .context_dtor = via_final_context, 46 .context_dtor = via_final_context,
47 .vblank_wait = via_driver_vblank_wait, 47 .get_vblank_counter = via_get_vblank_counter,
48 .enable_vblank = via_enable_vblank,
49 .disable_vblank = via_disable_vblank,
48 .irq_preinstall = via_driver_irq_preinstall, 50 .irq_preinstall = via_driver_irq_preinstall,
49 .irq_postinstall = via_driver_irq_postinstall, 51 .irq_postinstall = via_driver_irq_postinstall,
50 .irq_uninstall = via_driver_irq_uninstall, 52 .irq_uninstall = via_driver_irq_uninstall,
@@ -59,17 +61,17 @@ static struct drm_driver driver = {
59 .get_reg_ofs = drm_core_get_reg_ofs, 61 .get_reg_ofs = drm_core_get_reg_ofs,
60 .ioctls = via_ioctls, 62 .ioctls = via_ioctls,
61 .fops = { 63 .fops = {
62 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
63 .open = drm_open, 65 .open = drm_open,
64 .release = drm_release, 66 .release = drm_release,
65 .ioctl = drm_ioctl, 67 .ioctl = drm_ioctl,
66 .mmap = drm_mmap, 68 .mmap = drm_mmap,
67 .poll = drm_poll, 69 .poll = drm_poll,
68 .fasync = drm_fasync, 70 .fasync = drm_fasync,
69 }, 71 },
70 .pci_driver = { 72 .pci_driver = {
71 .name = DRIVER_NAME, 73 .name = DRIVER_NAME,
72 .id_table = pciidlist, 74 .id_table = pciidlist,
73 }, 75 },
74 76
75 .name = DRIVER_NAME, 77 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 2daae81874cd..cafcb844a223 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -75,6 +75,7 @@ typedef struct drm_via_private {
75 struct timeval last_vblank; 75 struct timeval last_vblank;
76 int last_vblank_valid; 76 int last_vblank_valid;
77 unsigned usec_per_vblank; 77 unsigned usec_per_vblank;
78 atomic_t vbl_received;
78 drm_via_state_t hc_state; 79 drm_via_state_t hc_state;
79 char pci_buf[VIA_PCI_BUF_SIZE]; 80 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; 81 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,21 +131,24 @@ extern int via_init_context(struct drm_device * dev, int context);
130extern int via_final_context(struct drm_device * dev, int context); 131extern int via_final_context(struct drm_device * dev, int context);
131 132
132extern int via_do_cleanup_map(struct drm_device * dev); 133extern int via_do_cleanup_map(struct drm_device * dev);
133extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 134extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
135extern int via_enable_vblank(struct drm_device *dev, int crtc);
136extern void via_disable_vblank(struct drm_device *dev, int crtc);
134 137
135extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 138extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
136extern void via_driver_irq_preinstall(struct drm_device * dev); 139extern void via_driver_irq_preinstall(struct drm_device * dev);
137extern void via_driver_irq_postinstall(struct drm_device * dev); 140extern int via_driver_irq_postinstall(struct drm_device *dev);
138extern void via_driver_irq_uninstall(struct drm_device * dev); 141extern void via_driver_irq_uninstall(struct drm_device * dev);
139 142
140extern int via_dma_cleanup(struct drm_device * dev); 143extern int via_dma_cleanup(struct drm_device * dev);
141extern void via_init_command_verifier(void); 144extern void via_init_command_verifier(void);
142extern int via_driver_dma_quiescent(struct drm_device * dev); 145extern int via_driver_dma_quiescent(struct drm_device * dev);
143extern void via_init_futex(drm_via_private_t * dev_priv); 146extern void via_init_futex(drm_via_private_t *dev_priv);
144extern void via_cleanup_futex(drm_via_private_t * dev_priv); 147extern void via_cleanup_futex(drm_via_private_t *dev_priv);
145extern void via_release_futex(drm_via_private_t * dev_priv, int context); 148extern void via_release_futex(drm_via_private_t *dev_priv, int context);
146 149
147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); 150extern void via_reclaim_buffers_locked(struct drm_device *dev,
151 struct drm_file *file_priv);
148extern void via_lastclose(struct drm_device *dev); 152extern void via_lastclose(struct drm_device *dev);
149 153
150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); 154extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c6bb978a1106..665d319b927b 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -43,7 +43,7 @@
43#define VIA_REG_INTERRUPT 0x200 43#define VIA_REG_INTERRUPT 0x200
44 44
45/* VIA_REG_INTERRUPT */ 45/* VIA_REG_INTERRUPT */
46#define VIA_IRQ_GLOBAL (1 << 31) 46#define VIA_IRQ_GLOBAL (1 << 31)
47#define VIA_IRQ_VBLANK_ENABLE (1 << 19) 47#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
48#define VIA_IRQ_VBLANK_PENDING (1 << 3) 48#define VIA_IRQ_VBLANK_PENDING (1 << 3)
49#define VIA_IRQ_HQV0_ENABLE (1 << 11) 49#define VIA_IRQ_HQV0_ENABLE (1 << 11)
@@ -68,16 +68,15 @@
68 68
69static maskarray_t via_pro_group_a_irqs[] = { 69static maskarray_t via_pro_group_a_irqs[] = {
70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
71 0x00000000}, 71 0x00000000 },
72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
73 0x00000000}, 73 0x00000000 },
74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
78}; 78};
79static int via_num_pro_group_a = 79static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
80 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
81static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 80static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
82 81
83static maskarray_t via_unichrome_irqs[] = { 82static maskarray_t via_unichrome_irqs[] = {
@@ -86,14 +85,24 @@ static maskarray_t via_unichrome_irqs[] = {
86 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 85 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 86 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88}; 87};
89static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 88static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
90static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 89static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
91 90
91
92static unsigned time_diff(struct timeval *now, struct timeval *then) 92static unsigned time_diff(struct timeval *now, struct timeval *then)
93{ 93{
94 return (now->tv_usec >= then->tv_usec) ? 94 return (now->tv_usec >= then->tv_usec) ?
95 now->tv_usec - then->tv_usec : 95 now->tv_usec - then->tv_usec :
96 1000000 - (then->tv_usec - now->tv_usec); 96 1000000 - (then->tv_usec - now->tv_usec);
97}
98
99u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
100{
101 drm_via_private_t *dev_priv = dev->dev_private;
102 if (crtc != 0)
103 return 0;
104
105 return atomic_read(&dev_priv->vbl_received);
97} 106}
98 107
99irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 108irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,23 +117,22 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
108 117
109 status = VIA_READ(VIA_REG_INTERRUPT); 118 status = VIA_READ(VIA_REG_INTERRUPT);
110 if (status & VIA_IRQ_VBLANK_PENDING) { 119 if (status & VIA_IRQ_VBLANK_PENDING) {
111 atomic_inc(&dev->vbl_received); 120 atomic_inc(&dev_priv->vbl_received);
112 if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 121 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
113 do_gettimeofday(&cur_vblank); 122 do_gettimeofday(&cur_vblank);
114 if (dev_priv->last_vblank_valid) { 123 if (dev_priv->last_vblank_valid) {
115 dev_priv->usec_per_vblank = 124 dev_priv->usec_per_vblank =
116 time_diff(&cur_vblank, 125 time_diff(&cur_vblank,
117 &dev_priv->last_vblank) >> 4; 126 &dev_priv->last_vblank) >> 4;
118 } 127 }
119 dev_priv->last_vblank = cur_vblank; 128 dev_priv->last_vblank = cur_vblank;
120 dev_priv->last_vblank_valid = 1; 129 dev_priv->last_vblank_valid = 1;
121 } 130 }
122 if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 131 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
123 DRM_DEBUG("US per vblank is: %u\n", 132 DRM_DEBUG("US per vblank is: %u\n",
124 dev_priv->usec_per_vblank); 133 dev_priv->usec_per_vblank);
125 } 134 }
126 DRM_WAKEUP(&dev->vbl_queue); 135 drm_handle_vblank(dev, 0);
127 drm_vbl_send_signals(dev);
128 handled = 1; 136 handled = 1;
129 } 137 }
130 138
@@ -145,6 +153,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
145 /* Acknowlege interrupts */ 153 /* Acknowlege interrupts */
146 VIA_WRITE(VIA_REG_INTERRUPT, status); 154 VIA_WRITE(VIA_REG_INTERRUPT, status);
147 155
156
148 if (handled) 157 if (handled)
149 return IRQ_HANDLED; 158 return IRQ_HANDLED;
150 else 159 else
@@ -163,31 +172,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
163 } 172 }
164} 173}
165 174
166int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 175int via_enable_vblank(struct drm_device *dev, int crtc)
167{ 176{
168 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 177 drm_via_private_t *dev_priv = dev->dev_private;
169 unsigned int cur_vblank; 178 u32 status;
170 int ret = 0;
171 179
172 DRM_DEBUG("\n"); 180 if (crtc != 0) {
173 if (!dev_priv) { 181 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
174 DRM_ERROR("called with no initialization\n");
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
177 184
178 viadrv_acknowledge_irqs(dev_priv); 185 status = VIA_READ(VIA_REG_INTERRUPT);
186 VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
187
188 VIA_WRITE8(0x83d4, 0x11);
189 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
179 190
180 /* Assume that the user has missed the current sequence number 191 return 0;
181 * by about a day rather than she wants to wait for years 192}
182 * using vertical blanks... 193
183 */ 194void via_disable_vblank(struct drm_device *dev, int crtc)
195{
196 drm_via_private_t *dev_priv = dev->dev_private;
184 197
185 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 198 VIA_WRITE8(0x83d4, 0x11);
186 (((cur_vblank = atomic_read(&dev->vbl_received)) - 199 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
187 *sequence) <= (1 << 23)));
188 200
189 *sequence = cur_vblank; 201 if (crtc != 0)
190 return ret; 202 DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
191} 203}
192 204
193static int 205static int
@@ -239,6 +251,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
239 return ret; 251 return ret;
240} 252}
241 253
254
242/* 255/*
243 * drm_dma.h hooks 256 * drm_dma.h hooks
244 */ 257 */
@@ -292,23 +305,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
292 } 305 }
293} 306}
294 307
295void via_driver_irq_postinstall(struct drm_device * dev) 308int via_driver_irq_postinstall(struct drm_device *dev)
296{ 309{
297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 310 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
298 u32 status; 311 u32 status;
299 312
300 DRM_DEBUG("\n"); 313 DRM_DEBUG("via_driver_irq_postinstall\n");
301 if (dev_priv) { 314 if (!dev_priv)
302 status = VIA_READ(VIA_REG_INTERRUPT); 315 return -EINVAL;
303 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
304 | dev_priv->irq_enable_mask);
305 316
306 /* Some magic, oh for some data sheets ! */ 317 drm_vblank_init(dev, 1);
318 status = VIA_READ(VIA_REG_INTERRUPT);
319 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
320 | dev_priv->irq_enable_mask);
307 321
308 VIA_WRITE8(0x83d4, 0x11); 322 /* Some magic, oh for some data sheets ! */
309 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 323 VIA_WRITE8(0x83d4, 0x11);
324 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
310 325
311 } 326 return 0;
312} 327}
313 328
314void via_driver_irq_uninstall(struct drm_device * dev) 329void via_driver_irq_uninstall(struct drm_device * dev)
@@ -339,9 +354,6 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
339 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 354 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
340 int force_sequence; 355 int force_sequence;
341 356
342 if (!dev->irq)
343 return -EINVAL;
344
345 if (irqwait->request.irq >= dev_priv->num_irqs) { 357 if (irqwait->request.irq >= dev_priv->num_irqs) {
346 DRM_ERROR("Trying to wait on unknown irq %d\n", 358 DRM_ERROR("Trying to wait on unknown irq %d\n",
347 irqwait->request.irq); 359 irqwait->request.irq);
@@ -352,7 +364,8 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
352 364
353 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 365 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
354 case VIA_IRQ_RELATIVE: 366 case VIA_IRQ_RELATIVE:
355 irqwait->request.sequence += atomic_read(&cur_irq->irq_received); 367 irqwait->request.sequence +=
368 atomic_read(&cur_irq->irq_received);
356 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 369 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
357 case VIA_IRQ_ABSOLUTE: 370 case VIA_IRQ_ABSOLUTE:
358 break; 371 break;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index e64094916e4f..f694cb5ededc 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -93,8 +93,7 @@ int via_final_context(struct drm_device *dev, int context)
93 /* Last context, perform cleanup */ 93 /* Last context, perform cleanup */
94 if (dev->ctx_count == 1 && dev->dev_private) { 94 if (dev->ctx_count == 1 && dev->dev_private) {
95 DRM_DEBUG("Last Context\n"); 95 DRM_DEBUG("Last Context\n");
96 if (dev->irq) 96 drm_irq_uninstall(dev);
97 drm_irq_uninstall(dev);
98 via_cleanup_futex(dev_priv); 97 via_cleanup_futex(dev_priv);
99 via_do_cleanup_map(dev); 98 via_do_cleanup_map(dev);
100 } 99 }
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 38d3c6b8276a..f46ba4b57da4 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,7 +36,6 @@
36#ifndef _DRM_H_ 36#ifndef _DRM_H_
37#define _DRM_H_ 37#define _DRM_H_
38 38
39#if defined(__linux__)
40#if defined(__KERNEL__) 39#if defined(__KERNEL__)
41#endif 40#endif
42#include <asm/ioctl.h> /* For _IO* macros */ 41#include <asm/ioctl.h> /* For _IO* macros */
@@ -46,22 +45,6 @@
46#define DRM_IOC_WRITE _IOC_WRITE 45#define DRM_IOC_WRITE _IOC_WRITE
47#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE 46#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
48#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 47#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
49#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
50#if defined(__FreeBSD__) && defined(IN_MODULE)
51/* Prevent name collision when including sys/ioccom.h */
52#undef ioctl
53#include <sys/ioccom.h>
54#define ioctl(a,b,c) xf86ioctl(a,b,c)
55#else
56#include <sys/ioccom.h>
57#endif /* __FreeBSD__ && xf86ioctl */
58#define DRM_IOCTL_NR(n) ((n) & 0xff)
59#define DRM_IOC_VOID IOC_VOID
60#define DRM_IOC_READ IOC_OUT
61#define DRM_IOC_WRITE IOC_IN
62#define DRM_IOC_READWRITE IOC_INOUT
63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
64#endif
65 48
66#define DRM_MAJOR 226 49#define DRM_MAJOR 226
67#define DRM_MAX_MINOR 15 50#define DRM_MAX_MINOR 15
@@ -471,6 +454,7 @@ struct drm_irq_busid {
471enum drm_vblank_seq_type { 454enum drm_vblank_seq_type {
472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 455 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 456 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
457 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
474 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 458 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
475 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 459 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
476 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ 460 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
@@ -503,6 +487,19 @@ union drm_wait_vblank {
503 struct drm_wait_vblank_reply reply; 487 struct drm_wait_vblank_reply reply;
504}; 488};
505 489
490#define _DRM_PRE_MODESET 1
491#define _DRM_POST_MODESET 2
492
493/**
494 * DRM_IOCTL_MODESET_CTL ioctl argument type
495 *
496 * \sa drmModesetCtl().
497 */
498struct drm_modeset_ctl {
499 uint32_t crtc;
500 uint32_t cmd;
501};
502
506/** 503/**
507 * DRM_IOCTL_AGP_ENABLE ioctl argument type. 504 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
508 * 505 *
@@ -573,6 +570,34 @@ struct drm_set_version {
573 int drm_dd_minor; 570 int drm_dd_minor;
574}; 571};
575 572
573/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
574struct drm_gem_close {
575 /** Handle of the object to be closed. */
576 uint32_t handle;
577 uint32_t pad;
578};
579
580/** DRM_IOCTL_GEM_FLINK ioctl argument type */
581struct drm_gem_flink {
582 /** Handle for the object being named */
583 uint32_t handle;
584
585 /** Returned global name */
586 uint32_t name;
587};
588
589/** DRM_IOCTL_GEM_OPEN ioctl argument type */
590struct drm_gem_open {
591 /** Name of object being opened */
592 uint32_t name;
593
594 /** Returned handle for the object */
595 uint32_t handle;
596
597 /** Returned size of the object */
598 uint64_t size;
599};
600
576#define DRM_IOCTL_BASE 'd' 601#define DRM_IOCTL_BASE 'd'
577#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 602#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
578#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 603#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@@ -587,6 +612,10 @@ struct drm_set_version {
587#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 612#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
588#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 613#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
589#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 614#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
615#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
616#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
617#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
618#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
590 619
591#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 620#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
592#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 621#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1c1b13e29223..59c796b46ee7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,7 @@ struct drm_device;
104#define DRIVER_DMA_QUEUE 0x200 104#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400 105#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800 106#define DRIVER_IRQ_VBL2 0x800
107#define DRIVER_GEM 0x1000
107 108
108/***********************************************************************/ 109/***********************************************************************/
109/** \name Begin the DRM... */ 110/** \name Begin the DRM... */
@@ -387,6 +388,10 @@ struct drm_file {
387 struct drm_minor *minor; 388 struct drm_minor *minor;
388 int remove_auth_on_close; 389 int remove_auth_on_close;
389 unsigned long lock_count; 390 unsigned long lock_count;
391 /** Mapping of mm object handles to object pointers. */
392 struct idr object_idr;
393 /** Lock for synchronization of access to object_idr. */
394 spinlock_t table_lock;
390 struct file *filp; 395 struct file *filp;
391 void *driver_priv; 396 void *driver_priv;
392}; 397};
@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
558}; 563};
559 564
560/** 565/**
566 * This structure defines the drm_mm memory object, which will be used by the
567 * DRM for its buffer objects.
568 */
569struct drm_gem_object {
570 /** Reference count of this object */
571 struct kref refcount;
572
573 /** Handle count of this object. Each handle also holds a reference */
574 struct kref handlecount;
575
576 /** Related drm device */
577 struct drm_device *dev;
578
579 /** File representing the shmem storage */
580 struct file *filp;
581
582 /**
583 * Size of the object, in bytes. Immutable over the object's
584 * lifetime.
585 */
586 size_t size;
587
588 /**
589 * Global name for this object, starts at 1. 0 means unnamed.
590 * Access is covered by the object_name_lock in the related drm_device
591 */
592 int name;
593
594 /**
595 * Memory domains. These monitor which caches contain read/write data
596 * related to the object. When transitioning from one set of domains
597 * to another, the driver is called to ensure that caches are suitably
598 * flushed and invalidated
599 */
600 uint32_t read_domains;
601 uint32_t write_domain;
602
603 /**
604 * While validating an exec operation, the
605 * new read/write domain values are computed here.
606 * They will be transferred to the above values
607 * at the point that any cache flushing occurs
608 */
609 uint32_t pending_read_domains;
610 uint32_t pending_write_domain;
611
612 void *driver_private;
613};
614
615/**
561 * DRM driver structure. This structure represent the common code for 616 * DRM driver structure. This structure represent the common code for
562 * a family of cards. There will one drm_device for each card present 617 * a family of cards. There will one drm_device for each card present
563 * in this family 618 * in this family
@@ -580,11 +635,54 @@ struct drm_driver {
580 int (*kernel_context_switch) (struct drm_device *dev, int old, 635 int (*kernel_context_switch) (struct drm_device *dev, int old,
581 int new); 636 int new);
582 void (*kernel_context_switch_unlock) (struct drm_device *dev); 637 void (*kernel_context_switch_unlock) (struct drm_device *dev);
583 int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
584 int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
585 int (*dri_library_name) (struct drm_device *dev, char *buf); 638 int (*dri_library_name) (struct drm_device *dev, char *buf);
586 639
587 /** 640 /**
641 * get_vblank_counter - get raw hardware vblank counter
642 * @dev: DRM device
643 * @crtc: counter to fetch
644 *
645 * Driver callback for fetching a raw hardware vblank counter
646 * for @crtc. If a device doesn't have a hardware counter, the
647 * driver can simply return the value of drm_vblank_count and
648 * make the enable_vblank() and disable_vblank() hooks into no-ops,
649 * leaving interrupts enabled at all times.
650 *
651 * Wraparound handling and loss of events due to modesetting is dealt
652 * with in the DRM core code.
653 *
654 * RETURNS
655 * Raw vblank counter value.
656 */
657 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
658
659 /**
660 * enable_vblank - enable vblank interrupt events
661 * @dev: DRM device
662 * @crtc: which irq to enable
663 *
664 * Enable vblank interrupts for @crtc. If the device doesn't have
665 * a hardware vblank counter, this routine should be a no-op, since
666 * interrupts will have to stay on to keep the count accurate.
667 *
668 * RETURNS
669 * Zero on success, appropriate errno if the given @crtc's vblank
670 * interrupt cannot be enabled.
671 */
672 int (*enable_vblank) (struct drm_device *dev, int crtc);
673
674 /**
675 * disable_vblank - disable vblank interrupt events
676 * @dev: DRM device
677 * @crtc: which irq to enable
678 *
679 * Disable vblank interrupts for @crtc. If the device doesn't have
680 * a hardware vblank counter, this routine should be a no-op, since
681 * interrupts will have to stay on to keep the count accurate.
682 */
683 void (*disable_vblank) (struct drm_device *dev, int crtc);
684
685 /**
588 * Called by \c drm_device_is_agp. Typically used to determine if a 686 * Called by \c drm_device_is_agp. Typically used to determine if a
589 * card is really attached to AGP or not. 687 * card is really attached to AGP or not.
590 * 688 *
@@ -601,7 +699,7 @@ struct drm_driver {
601 699
602 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 700 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
603 void (*irq_preinstall) (struct drm_device *dev); 701 void (*irq_preinstall) (struct drm_device *dev);
604 void (*irq_postinstall) (struct drm_device *dev); 702 int (*irq_postinstall) (struct drm_device *dev);
605 void (*irq_uninstall) (struct drm_device *dev); 703 void (*irq_uninstall) (struct drm_device *dev);
606 void (*reclaim_buffers) (struct drm_device *dev, 704 void (*reclaim_buffers) (struct drm_device *dev,
607 struct drm_file * file_priv); 705 struct drm_file * file_priv);
@@ -614,6 +712,18 @@ struct drm_driver {
614 void (*set_version) (struct drm_device *dev, 712 void (*set_version) (struct drm_device *dev,
615 struct drm_set_version *sv); 713 struct drm_set_version *sv);
616 714
715 int (*proc_init)(struct drm_minor *minor);
716 void (*proc_cleanup)(struct drm_minor *minor);
717
718 /**
719 * Driver-specific constructor for drm_gem_objects, to set up
720 * obj->driver_private.
721 *
722 * Returns 0 on success.
723 */
724 int (*gem_init_object) (struct drm_gem_object *obj);
725 void (*gem_free_object) (struct drm_gem_object *obj);
726
617 int major; 727 int major;
618 int minor; 728 int minor;
619 int patchlevel; 729 int patchlevel;
@@ -714,7 +824,6 @@ struct drm_device {
714 824
715 /** \name Context support */ 825 /** \name Context support */
716 /*@{ */ 826 /*@{ */
717 int irq; /**< Interrupt used by board */
718 int irq_enabled; /**< True if irq handler is enabled */ 827 int irq_enabled; /**< True if irq handler is enabled */
719 __volatile__ long context_flag; /**< Context swapping flag */ 828 __volatile__ long context_flag; /**< Context swapping flag */
720 __volatile__ long interrupt_flag; /**< Interruption handler flag */ 829 __volatile__ long interrupt_flag; /**< Interruption handler flag */
@@ -730,13 +839,28 @@ struct drm_device {
730 /** \name VBLANK IRQ support */ 839 /** \name VBLANK IRQ support */
731 /*@{ */ 840 /*@{ */
732 841
733 wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ 842 /*
734 atomic_t vbl_received; 843 * At load time, disabling the vblank interrupt won't be allowed since
735 atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */ 844 * old clients may not call the modeset ioctl and therefore misbehave.
845 * Once the modeset ioctl *has* been called though, we can safely
846 * disable them when unused.
847 */
848 int vblank_disable_allowed;
849
850 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
851 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
736 spinlock_t vbl_lock; 852 spinlock_t vbl_lock;
737 struct list_head vbl_sigs; /**< signal list to send on VBLANK */ 853 struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
738 struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ 854 atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
739 unsigned int vbl_pending; 855 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
856 u32 *last_vblank; /* protected by dev->vbl_lock, used */
857 /* for wraparound handling */
858 int *vblank_enabled; /* so we don't call enable more than
859 once per disable */
860 int *vblank_inmodeset; /* Display driver is setting mode */
861 struct timer_list vblank_disable_timer;
862
863 u32 max_vblank_count; /**< size of vblank counter register */
740 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 864 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
741 void (*locked_tasklet_func)(struct drm_device *dev); 865 void (*locked_tasklet_func)(struct drm_device *dev);
742 866
@@ -757,6 +881,7 @@ struct drm_device {
757 struct pci_controller *hose; 881 struct pci_controller *hose;
758#endif 882#endif
759 struct drm_sg_mem *sg; /**< Scatter gather memory */ 883 struct drm_sg_mem *sg; /**< Scatter gather memory */
884 int num_crtcs; /**< Number of CRTCs on this device */
760 void *dev_private; /**< device private data */ 885 void *dev_private; /**< device private data */
761 struct drm_sigdata sigdata; /**< For block_all_signals */ 886 struct drm_sigdata sigdata; /**< For block_all_signals */
762 sigset_t sigmask; 887 sigset_t sigmask;
@@ -771,8 +896,29 @@ struct drm_device {
771 spinlock_t drw_lock; 896 spinlock_t drw_lock;
772 struct idr drw_idr; 897 struct idr drw_idr;
773 /*@} */ 898 /*@} */
899
900 /** \name GEM information */
901 /*@{ */
902 spinlock_t object_name_lock;
903 struct idr object_name_idr;
904 atomic_t object_count;
905 atomic_t object_memory;
906 atomic_t pin_count;
907 atomic_t pin_memory;
908 atomic_t gtt_count;
909 atomic_t gtt_memory;
910 uint32_t gtt_total;
911 uint32_t invalidate_domains; /* domains pending invalidation */
912 uint32_t flush_domains; /* domains pending flush */
913 /*@} */
914
774}; 915};
775 916
917static inline int drm_dev_to_irq(struct drm_device *dev)
918{
919 return dev->pdev->irq;
920}
921
776static __inline__ int drm_core_check_feature(struct drm_device *dev, 922static __inline__ int drm_core_check_feature(struct drm_device *dev,
777 int feature) 923 int feature)
778{ 924{
@@ -867,6 +1013,11 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
867extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 1013extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
868extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 1014extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
869extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 1015extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1016extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1017 struct page **pages,
1018 unsigned long num_pages,
1019 uint32_t gtt_offset,
1020 uint32_t type);
870extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1021extern int drm_unbind_agp(DRM_AGP_MEM * handle);
871 1022
872 /* Misc. IOCTL support (drm_ioctl.h) */ 1023 /* Misc. IOCTL support (drm_ioctl.h) */
@@ -929,6 +1080,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
929extern int drm_authmagic(struct drm_device *dev, void *data, 1080extern int drm_authmagic(struct drm_device *dev, void *data,
930 struct drm_file *file_priv); 1081 struct drm_file *file_priv);
931 1082
1083/* Cache management (drm_cache.c) */
1084void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1085
932 /* Locking IOCTL support (drm_lock.h) */ 1086 /* Locking IOCTL support (drm_lock.h) */
933extern int drm_lock(struct drm_device *dev, void *data, 1087extern int drm_lock(struct drm_device *dev, void *data,
934 struct drm_file *file_priv); 1088 struct drm_file *file_priv);
@@ -985,15 +1139,25 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
985extern int drm_control(struct drm_device *dev, void *data, 1139extern int drm_control(struct drm_device *dev, void *data,
986 struct drm_file *file_priv); 1140 struct drm_file *file_priv);
987extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 1141extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
1142extern int drm_irq_install(struct drm_device *dev);
988extern int drm_irq_uninstall(struct drm_device *dev); 1143extern int drm_irq_uninstall(struct drm_device *dev);
989extern void drm_driver_irq_preinstall(struct drm_device *dev); 1144extern void drm_driver_irq_preinstall(struct drm_device *dev);
990extern void drm_driver_irq_postinstall(struct drm_device *dev); 1145extern void drm_driver_irq_postinstall(struct drm_device *dev);
991extern void drm_driver_irq_uninstall(struct drm_device *dev); 1146extern void drm_driver_irq_uninstall(struct drm_device *dev);
992 1147
1148extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
993extern int drm_wait_vblank(struct drm_device *dev, void *data, 1149extern int drm_wait_vblank(struct drm_device *dev, void *data,
994 struct drm_file *file_priv); 1150 struct drm_file *filp);
995extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1151extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
996extern void drm_vbl_send_signals(struct drm_device *dev); 1152extern void drm_locked_tasklet(struct drm_device *dev,
1153 void(*func)(struct drm_device *));
1154extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1155extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1156extern int drm_vblank_get(struct drm_device *dev, int crtc);
1157extern void drm_vblank_put(struct drm_device *dev, int crtc);
1158/* Modesetting support */
1159extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1160 struct drm_file *file_priv);
997extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1161extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
998 1162
999 /* AGP/GART support (drm_agpsupport.h) */ 1163 /* AGP/GART support (drm_agpsupport.h) */
@@ -1026,6 +1190,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
1026extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1190extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1027extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1191extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1028extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1192extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1193extern void drm_agp_chipset_flush(struct drm_device *dev);
1029 1194
1030 /* Stub support (drm_stub.h) */ 1195 /* Stub support (drm_stub.h) */
1031extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1196extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -1088,6 +1253,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1088extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1253extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1089extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1254extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1090 1255
1256/* Graphics Execution Manager library functions (drm_gem.c) */
1257int drm_gem_init(struct drm_device *dev);
1258void drm_gem_object_free(struct kref *kref);
1259struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1260 size_t size);
1261void drm_gem_object_handle_free(struct kref *kref);
1262
1263static inline void
1264drm_gem_object_reference(struct drm_gem_object *obj)
1265{
1266 kref_get(&obj->refcount);
1267}
1268
1269static inline void
1270drm_gem_object_unreference(struct drm_gem_object *obj)
1271{
1272 if (obj == NULL)
1273 return;
1274
1275 kref_put(&obj->refcount, drm_gem_object_free);
1276}
1277
1278int drm_gem_handle_create(struct drm_file *file_priv,
1279 struct drm_gem_object *obj,
1280 int *handlep);
1281
1282static inline void
1283drm_gem_object_handle_reference(struct drm_gem_object *obj)
1284{
1285 drm_gem_object_reference(obj);
1286 kref_get(&obj->handlecount);
1287}
1288
1289static inline void
1290drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1291{
1292 if (obj == NULL)
1293 return;
1294
1295 /*
1296 * Must bump handle count first as this may be the last
1297 * ref, in which case the object would disappear before we
1298 * checked for a name
1299 */
1300 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1301 drm_gem_object_unreference(obj);
1302}
1303
1304struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1305 struct drm_file *filp,
1306 int handle);
1307int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1308 struct drm_file *file_priv);
1309int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1310 struct drm_file *file_priv);
1311int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1312 struct drm_file *file_priv);
1313void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1314void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1315
1091extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1316extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1092extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); 1317extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
1093extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); 1318extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 135bd19499fc..da04109741e8 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -84,18 +84,18 @@
84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
87 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 87 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
88 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 88 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
89 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 89 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
90 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 90 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
91 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 91 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
92 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 92 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
93 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 93 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
94 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 94 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
95 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 95 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
96 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 96 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
97 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 97 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
98 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 98 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -113,8 +113,10 @@
113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
116 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 116 {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
117 {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 117 {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
118 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
119 {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
118 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 120 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
119 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 121 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
120 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 122 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
@@ -122,16 +124,16 @@
122 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 124 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
123 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 125 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
124 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 126 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
125 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 127 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
126 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 128 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
127 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 129 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
128 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 130 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
129 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 131 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
130 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 132 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
131 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 133 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
132 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 134 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
133 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 135 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
134 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 136 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
135 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 137 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
136 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 138 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
137 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 139 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
@@ -237,6 +239,10 @@
237 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 239 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 240 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
239 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 241 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
242 {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
243 {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
244 {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
245 {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
240 {0, 0, 0} 246 {0, 0, 0}
241 247
242#define r128_PCI_IDS \ 248#define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 05c66cf03a9e..eb4b35031a55 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
143#define DRM_I915_GET_VBLANK_PIPE 0x0e 143#define DRM_I915_GET_VBLANK_PIPE 0x0e
144#define DRM_I915_VBLANK_SWAP 0x0f 144#define DRM_I915_VBLANK_SWAP 0x0f
145#define DRM_I915_HWS_ADDR 0x11 145#define DRM_I915_HWS_ADDR 0x11
146#define DRM_I915_GEM_INIT 0x13
147#define DRM_I915_GEM_EXECBUFFER 0x14
148#define DRM_I915_GEM_PIN 0x15
149#define DRM_I915_GEM_UNPIN 0x16
150#define DRM_I915_GEM_BUSY 0x17
151#define DRM_I915_GEM_THROTTLE 0x18
152#define DRM_I915_GEM_ENTERVT 0x19
153#define DRM_I915_GEM_LEAVEVT 0x1a
154#define DRM_I915_GEM_CREATE 0x1b
155#define DRM_I915_GEM_PREAD 0x1c
156#define DRM_I915_GEM_PWRITE 0x1d
157#define DRM_I915_GEM_MMAP 0x1e
158#define DRM_I915_GEM_SET_DOMAIN 0x1f
159#define DRM_I915_GEM_SW_FINISH 0x20
160#define DRM_I915_GEM_SET_TILING 0x21
161#define DRM_I915_GEM_GET_TILING 0x22
146 162
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 163#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 164#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
160#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 178#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
179#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
180#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
181#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
182#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
183#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
184#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
185#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
186#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
187#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
188#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
189#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
190#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
191#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
192#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
163 193
164/* Allow drivers to submit batchbuffers directly to hardware, relying 194/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware. 195 * on the security mechanisms provided by hardware.
@@ -200,6 +230,8 @@ typedef struct drm_i915_irq_wait {
200#define I915_PARAM_IRQ_ACTIVE 1 230#define I915_PARAM_IRQ_ACTIVE 1
201#define I915_PARAM_ALLOW_BATCHBUFFER 2 231#define I915_PARAM_ALLOW_BATCHBUFFER 2
202#define I915_PARAM_LAST_DISPATCH 3 232#define I915_PARAM_LAST_DISPATCH 3
233#define I915_PARAM_CHIPSET_ID 4
234#define I915_PARAM_HAS_GEM 5
203 235
204typedef struct drm_i915_getparam { 236typedef struct drm_i915_getparam {
205 int param; 237 int param;
@@ -267,4 +299,305 @@ typedef struct drm_i915_hws_addr {
267 uint64_t addr; 299 uint64_t addr;
268} drm_i915_hws_addr_t; 300} drm_i915_hws_addr_t;
269 301
302struct drm_i915_gem_init {
303 /**
304 * Beginning offset in the GTT to be managed by the DRM memory
305 * manager.
306 */
307 uint64_t gtt_start;
308 /**
309 * Ending offset in the GTT to be managed by the DRM memory
310 * manager.
311 */
312 uint64_t gtt_end;
313};
314
315struct drm_i915_gem_create {
316 /**
317 * Requested size for the object.
318 *
319 * The (page-aligned) allocated size for the object will be returned.
320 */
321 uint64_t size;
322 /**
323 * Returned handle for the object.
324 *
325 * Object handles are nonzero.
326 */
327 uint32_t handle;
328 uint32_t pad;
329};
330
331struct drm_i915_gem_pread {
332 /** Handle for the object being read. */
333 uint32_t handle;
334 uint32_t pad;
335 /** Offset into the object to read from */
336 uint64_t offset;
337 /** Length of data to read */
338 uint64_t size;
339 /**
340 * Pointer to write the data into.
341 *
342 * This is a fixed-size type for 32/64 compatibility.
343 */
344 uint64_t data_ptr;
345};
346
347struct drm_i915_gem_pwrite {
348 /** Handle for the object being written to. */
349 uint32_t handle;
350 uint32_t pad;
351 /** Offset into the object to write to */
352 uint64_t offset;
353 /** Length of data to write */
354 uint64_t size;
355 /**
356 * Pointer to read the data from.
357 *
358 * This is a fixed-size type for 32/64 compatibility.
359 */
360 uint64_t data_ptr;
361};
362
363struct drm_i915_gem_mmap {
364 /** Handle for the object being mapped. */
365 uint32_t handle;
366 uint32_t pad;
367 /** Offset in the object to map. */
368 uint64_t offset;
369 /**
370 * Length of data to map.
371 *
372 * The value will be page-aligned.
373 */
374 uint64_t size;
375 /**
376 * Returned pointer the data was mapped at.
377 *
378 * This is a fixed-size type for 32/64 compatibility.
379 */
380 uint64_t addr_ptr;
381};
382
383struct drm_i915_gem_set_domain {
384 /** Handle for the object */
385 uint32_t handle;
386
387 /** New read domains */
388 uint32_t read_domains;
389
390 /** New write domain */
391 uint32_t write_domain;
392};
393
394struct drm_i915_gem_sw_finish {
395 /** Handle for the object */
396 uint32_t handle;
397};
398
399struct drm_i915_gem_relocation_entry {
400 /**
401 * Handle of the buffer being pointed to by this relocation entry.
402 *
403 * It's appealing to make this be an index into the mm_validate_entry
404 * list to refer to the buffer, but this allows the driver to create
405 * a relocation list for state buffers and not re-write it per
406 * exec using the buffer.
407 */
408 uint32_t target_handle;
409
410 /**
411 * Value to be added to the offset of the target buffer to make up
412 * the relocation entry.
413 */
414 uint32_t delta;
415
416 /** Offset in the buffer the relocation entry will be written into */
417 uint64_t offset;
418
419 /**
420 * Offset value of the target buffer that the relocation entry was last
421 * written as.
422 *
423 * If the buffer has the same offset as last time, we can skip syncing
424 * and writing the relocation. This value is written back out by
425 * the execbuffer ioctl when the relocation is written.
426 */
427 uint64_t presumed_offset;
428
429 /**
430 * Target memory domains read by this operation.
431 */
432 uint32_t read_domains;
433
434 /**
435 * Target memory domains written by this operation.
436 *
437 * Note that only one domain may be written by the whole
438 * execbuffer operation, so that where there are conflicts,
439 * the application will get -EINVAL back.
440 */
441 uint32_t write_domain;
442};
443
444/** @{
445 * Intel memory domains
446 *
447 * Most of these just align with the various caches in
448 * the system and are used to flush and invalidate as
449 * objects end up cached in different domains.
450 */
451/** CPU cache */
452#define I915_GEM_DOMAIN_CPU 0x00000001
453/** Render cache, used by 2D and 3D drawing */
454#define I915_GEM_DOMAIN_RENDER 0x00000002
455/** Sampler cache, used by texture engine */
456#define I915_GEM_DOMAIN_SAMPLER 0x00000004
457/** Command queue, used to load batch buffers */
458#define I915_GEM_DOMAIN_COMMAND 0x00000008
459/** Instruction cache, used by shader programs */
460#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
461/** Vertex address cache */
462#define I915_GEM_DOMAIN_VERTEX 0x00000020
463/** GTT domain - aperture and scanout */
464#define I915_GEM_DOMAIN_GTT 0x00000040
465/** @} */
466
467struct drm_i915_gem_exec_object {
468 /**
469 * User's handle for a buffer to be bound into the GTT for this
470 * operation.
471 */
472 uint32_t handle;
473
474 /** Number of relocations to be performed on this buffer */
475 uint32_t relocation_count;
476 /**
477 * Pointer to array of struct drm_i915_gem_relocation_entry containing
478 * the relocations to be performed in this buffer.
479 */
480 uint64_t relocs_ptr;
481
482 /** Required alignment in graphics aperture */
483 uint64_t alignment;
484
485 /**
486 * Returned value of the updated offset of the object, for future
487 * presumed_offset writes.
488 */
489 uint64_t offset;
490};
491
492struct drm_i915_gem_execbuffer {
493 /**
494 * List of buffers to be validated with their relocations to be
495 * performend on them.
496 *
497 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
498 *
499 * These buffers must be listed in an order such that all relocations
500 * a buffer is performing refer to buffers that have already appeared
501 * in the validate list.
502 */
503 uint64_t buffers_ptr;
504 uint32_t buffer_count;
505
506 /** Offset in the batchbuffer to start execution from. */
507 uint32_t batch_start_offset;
508 /** Bytes used in batchbuffer from batch_start_offset */
509 uint32_t batch_len;
510 uint32_t DR1;
511 uint32_t DR4;
512 uint32_t num_cliprects;
513 /** This is a struct drm_clip_rect *cliprects */
514 uint64_t cliprects_ptr;
515};
516
517struct drm_i915_gem_pin {
518 /** Handle of the buffer to be pinned. */
519 uint32_t handle;
520 uint32_t pad;
521
522 /** alignment required within the aperture */
523 uint64_t alignment;
524
525 /** Returned GTT offset of the buffer. */
526 uint64_t offset;
527};
528
529struct drm_i915_gem_unpin {
530 /** Handle of the buffer to be unpinned. */
531 uint32_t handle;
532 uint32_t pad;
533};
534
535struct drm_i915_gem_busy {
536 /** Handle of the buffer to check for busy */
537 uint32_t handle;
538
539 /** Return busy status (1 if busy, 0 if idle) */
540 uint32_t busy;
541};
542
543#define I915_TILING_NONE 0
544#define I915_TILING_X 1
545#define I915_TILING_Y 2
546
547#define I915_BIT_6_SWIZZLE_NONE 0
548#define I915_BIT_6_SWIZZLE_9 1
549#define I915_BIT_6_SWIZZLE_9_10 2
550#define I915_BIT_6_SWIZZLE_9_11 3
551#define I915_BIT_6_SWIZZLE_9_10_11 4
552/* Not seen by userland */
553#define I915_BIT_6_SWIZZLE_UNKNOWN 5
554
555struct drm_i915_gem_set_tiling {
556 /** Handle of the buffer to have its tiling state updated */
557 uint32_t handle;
558
559 /**
560 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
561 * I915_TILING_Y).
562 *
563 * This value is to be set on request, and will be updated by the
564 * kernel on successful return with the actual chosen tiling layout.
565 *
566 * The tiling mode may be demoted to I915_TILING_NONE when the system
567 * has bit 6 swizzling that can't be managed correctly by GEM.
568 *
569 * Buffer contents become undefined when changing tiling_mode.
570 */
571 uint32_t tiling_mode;
572
573 /**
574 * Stride in bytes for the object when in I915_TILING_X or
575 * I915_TILING_Y.
576 */
577 uint32_t stride;
578
579 /**
580 * Returned address bit 6 swizzling required for CPU access through
581 * mmap mapping.
582 */
583 uint32_t swizzle_mode;
584};
585
586struct drm_i915_gem_get_tiling {
587 /** Handle of the buffer to get tiling state for. */
588 uint32_t handle;
589
590 /**
591 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
592 * I915_TILING_Y).
593 */
594 uint32_t tiling_mode;
595
596 /**
597 * Returned address bit 6 swizzling required for CPU access through
598 * mmap mapping.
599 */
600 uint32_t swizzle_mode;
601};
602
270#endif /* _I915_DRM_H_ */ 603#endif /* _I915_DRM_H_ */
diff --git a/mm/shmem.c b/mm/shmem.c
index bf66d0191baf..d87958a5f03e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2580,6 +2580,7 @@ put_memory:
2580 shmem_unacct_size(flags, size); 2580 shmem_unacct_size(flags, size);
2581 return ERR_PTR(error); 2581 return ERR_PTR(error);
2582} 2582}
2583EXPORT_SYMBOL_GPL(shmem_file_setup);
2583 2584
2584/** 2585/**
2585 * shmem_zero_setup - setup a shared anonymous mapping 2586 * shmem_zero_setup - setup a shared anonymous mapping