aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_cache.c76
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c420
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c5
-rw-r--r--drivers/gpu/drm/drm_proc.c135
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h253
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2497
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c201
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c292
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c256
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h37
-rw-r--r--include/drm/drm.h31
-rw-r--r--include/drm/drmP.h151
-rw-r--r--include/drm/i915_drm.h332
23 files changed, 4832 insertions, 62 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e9f9a97ae00a..74da99495e21 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,8 +4,9 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 7drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 8 drm_context.o drm_dma.o drm_drawable.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index aefa5ac4c0b1..2639be2db9e5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
36 37
37#if __OS_HAS_AGP 38#if __OS_HAS_AGP
38 39
@@ -452,4 +453,52 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
452 return agp_unbind_memory(handle); 453 return agp_unbind_memory(handle);
453} 454}
454 455
455#endif /* __OS_HAS_AGP */ 456/**
457 * Binds a collection of pages into AGP memory at the given offset, returning
458 * the AGP memory structure containing them.
459 *
460 * No reference is held on the pages during this time -- it is up to the
461 * caller to handle that.
462 */
463DRM_AGP_MEM *
464drm_agp_bind_pages(struct drm_device *dev,
465 struct page **pages,
466 unsigned long num_pages,
467 uint32_t gtt_offset)
468{
469 DRM_AGP_MEM *mem;
470 int ret, i;
471
472 DRM_DEBUG("\n");
473
474 mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
475 AGP_USER_MEMORY);
476 if (mem == NULL) {
477 DRM_ERROR("Failed to allocate memory for %ld pages\n",
478 num_pages);
479 return NULL;
480 }
481
482 for (i = 0; i < num_pages; i++)
483 mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
484 mem->page_count = num_pages;
485
486 mem->is_flushed = true;
487 ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
488 if (ret != 0) {
489 DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
490 agp_free_memory(mem);
491 return NULL;
492 }
493
494 return mem;
495}
496EXPORT_SYMBOL(drm_agp_bind_pages);
497
498void drm_agp_chipset_flush(struct drm_device *dev)
499{
500 agp_flush_chipset(dev->agp->bridge);
501}
502EXPORT_SYMBOL(drm_agp_chipset_flush);
503
504#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
new file mode 100644
index 000000000000..9475f7d9901d
--- /dev/null
+++ b/drivers/gpu/drm/drm_cache.c
@@ -0,0 +1,76 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29 */
30
31#include "drmP.h"
32
33#if defined(CONFIG_X86)
34static void
35drm_clflush_page(struct page *page)
36{
37 uint8_t *page_virtual;
38 unsigned int i;
39
40 if (unlikely(page == NULL))
41 return;
42
43 page_virtual = kmap_atomic(page, KM_USER0);
44 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
45 clflush(page_virtual + i);
46 kunmap_atomic(page_virtual, KM_USER0);
47}
48#endif
49
50static void
51drm_clflush_ipi_handler(void *null)
52{
53 wbinvd();
54}
55
56void
57drm_clflush_pages(struct page *pages[], unsigned long num_pages)
58{
59
60#if defined(CONFIG_X86)
61 if (cpu_has_clflush) {
62 unsigned long i;
63
64 mb();
65 for (i = 0; i < num_pages; ++i)
66 drm_clflush_page(*pages++);
67 mb();
68
69 return;
70 }
71#endif
72
73 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
74 DRM_ERROR("Timed out waiting for cache flush.\n");
75}
76EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fb45fe7aeb8c..96f416afc3f6 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -119,6 +119,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
119 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 119 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
120 120
121 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
122
123 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
124 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
125 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
122}; 126};
123 127
124#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 128#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index dcf8b4dc9549..0d46627663b1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
256 256
257 INIT_LIST_HEAD(&priv->lhead); 257 INIT_LIST_HEAD(&priv->lhead);
258 258
259 if (dev->driver->driver_features & DRIVER_GEM)
260 drm_gem_open(dev, priv);
261
259 if (dev->driver->open) { 262 if (dev->driver->open) {
260 ret = dev->driver->open(dev, priv); 263 ret = dev->driver->open(dev, priv);
261 if (ret < 0) 264 if (ret < 0)
@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
400 dev->driver->reclaim_buffers(dev, file_priv); 403 dev->driver->reclaim_buffers(dev, file_priv);
401 } 404 }
402 405
406 if (dev->driver->driver_features & DRIVER_GEM)
407 drm_gem_release(dev, file_priv);
408
403 drm_fasync(-1, filp, 0); 409 drm_fasync(-1, filp, 0);
404 410
405 mutex_lock(&dev->ctxlist_mutex); 411 mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
new file mode 100644
index 000000000000..434155b387e9
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem.c
@@ -0,0 +1,420 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include "drmP.h"
38
39/** @file drm_gem.c
40 *
41 * This file provides some of the base ioctls and library routines for
42 * the graphics memory manager implemented by each device driver.
43 *
44 * Because various devices have different requirements in terms of
45 * synchronization and migration strategies, implementing that is left up to
46 * the driver, and all that the general API provides should be generic --
47 * allocating objects, reading/writing data with the cpu, freeing objects.
48 * Even there, platform-dependent optimizations for reading/writing data with
49 * the CPU mean we'll likely hook those out to driver-specific calls. However,
50 * the DRI2 implementation wants to have at least allocate/mmap be generic.
51 *
52 * The goal was to have swap-backed object allocation managed through
53 * struct file. However, file descriptors as handles to a struct file have
54 * two major failings:
55 * - Process limits prevent more than 1024 or so being used at a time by
56 * default.
57 * - Inability to allocate high fds will aggravate the X Server's select()
58 * handling, and likely that of many GL client applications as well.
59 *
60 * This led to a plan of using our own integer IDs (called handles, following
61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62 * ioctls. The objects themselves will still include the struct file so
63 * that we can transition to fds if the required kernel infrastructure shows
64 * up at a later date, and as our interface with shmfs for memory allocation.
65 */
66
67/**
68 * Initialize the GEM device fields
69 */
70
71int
72drm_gem_init(struct drm_device *dev)
73{
74 spin_lock_init(&dev->object_name_lock);
75 idr_init(&dev->object_name_idr);
76 atomic_set(&dev->object_count, 0);
77 atomic_set(&dev->object_memory, 0);
78 atomic_set(&dev->pin_count, 0);
79 atomic_set(&dev->pin_memory, 0);
80 atomic_set(&dev->gtt_count, 0);
81 atomic_set(&dev->gtt_memory, 0);
82 return 0;
83}
84
85/**
86 * Allocate a GEM object of the specified size with shmfs backing store
87 */
88struct drm_gem_object *
89drm_gem_object_alloc(struct drm_device *dev, size_t size)
90{
91 struct drm_gem_object *obj;
92
93 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
94
95 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
96
97 obj->dev = dev;
98 obj->filp = shmem_file_setup("drm mm object", size, 0);
99 if (IS_ERR(obj->filp)) {
100 kfree(obj);
101 return NULL;
102 }
103
104 kref_init(&obj->refcount);
105 kref_init(&obj->handlecount);
106 obj->size = size;
107 if (dev->driver->gem_init_object != NULL &&
108 dev->driver->gem_init_object(obj) != 0) {
109 fput(obj->filp);
110 kfree(obj);
111 return NULL;
112 }
113 atomic_inc(&dev->object_count);
114 atomic_add(obj->size, &dev->object_memory);
115 return obj;
116}
117EXPORT_SYMBOL(drm_gem_object_alloc);
118
119/**
120 * Removes the mapping from handle to filp for this object.
121 */
122static int
123drm_gem_handle_delete(struct drm_file *filp, int handle)
124{
125 struct drm_device *dev;
126 struct drm_gem_object *obj;
127
128 /* This is gross. The idr system doesn't let us try a delete and
129 * return an error code. It just spews if you fail at deleting.
130 * So, we have to grab a lock around finding the object and then
131 * doing the delete on it and dropping the refcount, or the user
132 * could race us to double-decrement the refcount and cause a
133 * use-after-free later. Given the frequency of our handle lookups,
134 * we may want to use ida for number allocation and a hash table
135 * for the pointers, anyway.
136 */
137 spin_lock(&filp->table_lock);
138
139 /* Check if we currently have a reference on the object */
140 obj = idr_find(&filp->object_idr, handle);
141 if (obj == NULL) {
142 spin_unlock(&filp->table_lock);
143 return -EINVAL;
144 }
145 dev = obj->dev;
146
147 /* Release reference and decrement refcount. */
148 idr_remove(&filp->object_idr, handle);
149 spin_unlock(&filp->table_lock);
150
151 mutex_lock(&dev->struct_mutex);
152 drm_gem_object_handle_unreference(obj);
153 mutex_unlock(&dev->struct_mutex);
154
155 return 0;
156}
157
158/**
159 * Create a handle for this object. This adds a handle reference
160 * to the object, which includes a regular reference count. Callers
161 * will likely want to dereference the object afterwards.
162 */
163int
164drm_gem_handle_create(struct drm_file *file_priv,
165 struct drm_gem_object *obj,
166 int *handlep)
167{
168 int ret;
169
170 /*
171 * Get the user-visible handle using idr.
172 */
173again:
174 /* ensure there is space available to allocate a handle */
175 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
176 return -ENOMEM;
177
178 /* do the allocation under our spinlock */
179 spin_lock(&file_priv->table_lock);
180 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
181 spin_unlock(&file_priv->table_lock);
182 if (ret == -EAGAIN)
183 goto again;
184
185 if (ret != 0)
186 return ret;
187
188 drm_gem_object_handle_reference(obj);
189 return 0;
190}
191EXPORT_SYMBOL(drm_gem_handle_create);
192
193/** Returns a reference to the object named by the handle. */
194struct drm_gem_object *
195drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
196 int handle)
197{
198 struct drm_gem_object *obj;
199
200 spin_lock(&filp->table_lock);
201
202 /* Check if we currently have a reference on the object */
203 obj = idr_find(&filp->object_idr, handle);
204 if (obj == NULL) {
205 spin_unlock(&filp->table_lock);
206 return NULL;
207 }
208
209 drm_gem_object_reference(obj);
210
211 spin_unlock(&filp->table_lock);
212
213 return obj;
214}
215EXPORT_SYMBOL(drm_gem_object_lookup);
216
217/**
218 * Releases the handle to an mm object.
219 */
220int
221drm_gem_close_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct drm_gem_close *args = data;
225 int ret;
226
227 if (!(dev->driver->driver_features & DRIVER_GEM))
228 return -ENODEV;
229
230 ret = drm_gem_handle_delete(file_priv, args->handle);
231
232 return ret;
233}
234
235/**
236 * Create a global name for an object, returning the name.
237 *
238 * Note that the name does not hold a reference; when the object
239 * is freed, the name goes away.
240 */
241int
242drm_gem_flink_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *file_priv)
244{
245 struct drm_gem_flink *args = data;
246 struct drm_gem_object *obj;
247 int ret;
248
249 if (!(dev->driver->driver_features & DRIVER_GEM))
250 return -ENODEV;
251
252 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
253 if (obj == NULL)
254 return -EINVAL;
255
256again:
257 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
258 return -ENOMEM;
259
260 spin_lock(&dev->object_name_lock);
261 if (obj->name) {
262 spin_unlock(&dev->object_name_lock);
263 return -EEXIST;
264 }
265 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
266 &obj->name);
267 spin_unlock(&dev->object_name_lock);
268 if (ret == -EAGAIN)
269 goto again;
270
271 if (ret != 0) {
272 mutex_lock(&dev->struct_mutex);
273 drm_gem_object_unreference(obj);
274 mutex_unlock(&dev->struct_mutex);
275 return ret;
276 }
277
278 /*
279 * Leave the reference from the lookup around as the
280 * name table now holds one
281 */
282 args->name = (uint64_t) obj->name;
283
284 return 0;
285}
286
287/**
288 * Open an object using the global name, returning a handle and the size.
289 *
290 * This handle (of course) holds a reference to the object, so the object
291 * will not go away until the handle is deleted.
292 */
293int
294drm_gem_open_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296{
297 struct drm_gem_open *args = data;
298 struct drm_gem_object *obj;
299 int ret;
300 int handle;
301
302 if (!(dev->driver->driver_features & DRIVER_GEM))
303 return -ENODEV;
304
305 spin_lock(&dev->object_name_lock);
306 obj = idr_find(&dev->object_name_idr, (int) args->name);
307 if (obj)
308 drm_gem_object_reference(obj);
309 spin_unlock(&dev->object_name_lock);
310 if (!obj)
311 return -ENOENT;
312
313 ret = drm_gem_handle_create(file_priv, obj, &handle);
314 mutex_lock(&dev->struct_mutex);
315 drm_gem_object_unreference(obj);
316 mutex_unlock(&dev->struct_mutex);
317 if (ret)
318 return ret;
319
320 args->handle = handle;
321 args->size = obj->size;
322
323 return 0;
324}
325
326/**
327 * Called at device open time, sets up the structure for handling refcounting
328 * of mm objects.
329 */
330void
331drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
332{
333 idr_init(&file_private->object_idr);
334 spin_lock_init(&file_private->table_lock);
335}
336
337/**
338 * Called at device close to release the file's
339 * handle references on objects.
340 */
341static int
342drm_gem_object_release_handle(int id, void *ptr, void *data)
343{
344 struct drm_gem_object *obj = ptr;
345
346 drm_gem_object_handle_unreference(obj);
347
348 return 0;
349}
350
351/**
352 * Called at close time when the filp is going away.
353 *
354 * Releases any remaining references on objects by this filp.
355 */
356void
357drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
358{
359 mutex_lock(&dev->struct_mutex);
360 idr_for_each(&file_private->object_idr,
361 &drm_gem_object_release_handle, NULL);
362
363 idr_destroy(&file_private->object_idr);
364 mutex_unlock(&dev->struct_mutex);
365}
366
367/**
368 * Called after the last reference to the object has been lost.
369 *
370 * Frees the object
371 */
372void
373drm_gem_object_free(struct kref *kref)
374{
375 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
376 struct drm_device *dev = obj->dev;
377
378 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
379
380 if (dev->driver->gem_free_object != NULL)
381 dev->driver->gem_free_object(obj);
382
383 fput(obj->filp);
384 atomic_dec(&dev->object_count);
385 atomic_sub(obj->size, &dev->object_memory);
386 kfree(obj);
387}
388EXPORT_SYMBOL(drm_gem_object_free);
389
390/**
391 * Called after the last handle to the object has been closed
392 *
393 * Removes any name for the object. Note that this must be
394 * called before drm_gem_object_free or we'll be touching
395 * freed memory
396 */
397void
398drm_gem_object_handle_free(struct kref *kref)
399{
400 struct drm_gem_object *obj = container_of(kref,
401 struct drm_gem_object,
402 handlecount);
403 struct drm_device *dev = obj->dev;
404
405 /* Remove any name for this object */
406 spin_lock(&dev->object_name_lock);
407 if (obj->name) {
408 idr_remove(&dev->object_name_idr, obj->name);
409 spin_unlock(&dev->object_name_lock);
410 /*
411 * The object name held a reference to this object, drop
412 * that now.
413 */
414 drm_gem_object_unreference(obj);
415 } else
416 spin_unlock(&dev->object_name_lock);
417
418}
419EXPORT_SYMBOL(drm_gem_object_handle_free);
420
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0177012845c6..803bc9e7ce3c 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
133{ 133{
134 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 134 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
135} 135}
136EXPORT_SYMBOL(drm_free_agp);
136 137
137/** Wrapper around agp_bind_memory() */ 138/** Wrapper around agp_bind_memory() */
138int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 139int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
145{ 146{
146 return drm_agp_unbind_memory(handle); 147 return drm_agp_unbind_memory(handle);
147} 148}
149EXPORT_SYMBOL(drm_unbind_agp);
148 150
149#else /* __OS_HAS_AGP */ 151#else /* __OS_HAS_AGP */
150static inline void *agp_remap(unsigned long offset, unsigned long size, 152static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index dcff9e9b52e3..217ad7dc7076 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 169
170 return child; 170 return child;
171} 171}
172EXPORT_SYMBOL(drm_mm_get_block);
172 173
173/* 174/*
174 * Put a block. Merge with the previous and / or next block if they are free. 175 * Put a block. Merge with the previous and / or next block if they are free.
@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
217 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 218 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
218 } 219 }
219} 220}
221EXPORT_SYMBOL(drm_mm_put_block);
220 222
221struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
222 unsigned long size, 224 unsigned long size,
@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
265 267
266 return (head->next->next == head); 268 return (head->next->next == head);
267} 269}
270EXPORT_SYMBOL(drm_mm_search_free);
268 271
269int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
270{ 273{
@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273 276
274 return drm_mm_create_tail_node(mm, start, size); 277 return drm_mm_create_tail_node(mm, start, size);
275} 278}
276 279EXPORT_SYMBOL(drm_mm_init);
277 280
278void drm_mm_takedown(struct drm_mm * mm) 281void drm_mm_takedown(struct drm_mm * mm)
279{ 282{
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 93b1e0475c93..d490db4c0de0 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data); 49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset, 50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data); 51 int request, int *eof, void *data);
52static int drm_gem_name_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_object_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
52#if DRM_DEBUG_CODE 56#if DRM_DEBUG_CODE
53static int drm_vma_info(char *buf, char **start, off_t offset, 57static int drm_vma_info(char *buf, char **start, off_t offset,
54 int request, int *eof, void *data); 58 int request, int *eof, void *data);
@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
60static struct drm_proc_list { 64static struct drm_proc_list {
61 const char *name; /**< file name */ 65 const char *name; /**< file name */
62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 66 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
67 u32 driver_features; /**< Required driver features for this entry */
63} drm_proc_list[] = { 68} drm_proc_list[] = {
64 {"name", drm_name_info}, 69 {"name", drm_name_info, 0},
65 {"mem", drm_mem_info}, 70 {"mem", drm_mem_info, 0},
66 {"vm", drm_vm_info}, 71 {"vm", drm_vm_info, 0},
67 {"clients", drm_clients_info}, 72 {"clients", drm_clients_info, 0},
68 {"queues", drm_queues_info}, 73 {"queues", drm_queues_info, 0},
69 {"bufs", drm_bufs_info}, 74 {"bufs", drm_bufs_info, 0},
75 {"gem_names", drm_gem_name_info, DRIVER_GEM},
76 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
70#if DRM_DEBUG_CODE 77#if DRM_DEBUG_CODE
71 {"vma", drm_vma_info}, 78 {"vma", drm_vma_info},
72#endif 79#endif
@@ -90,8 +97,9 @@ static struct drm_proc_list {
90int drm_proc_init(struct drm_minor *minor, int minor_id, 97int drm_proc_init(struct drm_minor *minor, int minor_id,
91 struct proc_dir_entry *root) 98 struct proc_dir_entry *root)
92{ 99{
100 struct drm_device *dev = minor->dev;
93 struct proc_dir_entry *ent; 101 struct proc_dir_entry *ent;
94 int i, j; 102 int i, j, ret;
95 char name[64]; 103 char name[64];
96 104
97 sprintf(name, "%d", minor_id); 105 sprintf(name, "%d", minor_id);
@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
102 } 110 }
103 111
104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 112 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
113 u32 features = drm_proc_list[i].driver_features;
114
115 if (features != 0 &&
116 (dev->driver->driver_features & features) != features)
117 continue;
118
105 ent = create_proc_entry(drm_proc_list[i].name, 119 ent = create_proc_entry(drm_proc_list[i].name,
106 S_IFREG | S_IRUGO, minor->dev_root); 120 S_IFREG | S_IRUGO, minor->dev_root);
107 if (!ent) { 121 if (!ent) {
108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 122 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
109 name, drm_proc_list[i].name); 123 name, drm_proc_list[i].name);
110 for (j = 0; j < i; j++) 124 ret = -1;
111 remove_proc_entry(drm_proc_list[i].name, 125 goto fail;
112 minor->dev_root);
113 remove_proc_entry(name, root);
114 minor->dev_root = NULL;
115 return -1;
116 } 126 }
117 ent->read_proc = drm_proc_list[i].f; 127 ent->read_proc = drm_proc_list[i].f;
118 ent->data = minor; 128 ent->data = minor;
119 } 129 }
120 130
131 if (dev->driver->proc_init) {
132 ret = dev->driver->proc_init(minor);
133 if (ret) {
134 DRM_ERROR("DRM: Driver failed to initialize "
135 "/proc/dri.\n");
136 goto fail;
137 }
138 }
139
121 return 0; 140 return 0;
141 fail:
142
143 for (j = 0; j < i; j++)
144 remove_proc_entry(drm_proc_list[i].name,
145 minor->dev_root);
146 remove_proc_entry(name, root);
147 minor->dev_root = NULL;
148 return ret;
122} 149}
123 150
124/** 151/**
@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
133 */ 160 */
134int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 161int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
135{ 162{
163 struct drm_device *dev = minor->dev;
136 int i; 164 int i;
137 char name[64]; 165 char name[64];
138 166
139 if (!root || !minor->dev_root) 167 if (!root || !minor->dev_root)
140 return 0; 168 return 0;
141 169
170 if (dev->driver->proc_cleanup)
171 dev->driver->proc_cleanup(minor);
172
142 for (i = 0; i < DRM_PROC_ENTRIES; i++) 173 for (i = 0; i < DRM_PROC_ENTRIES; i++)
143 remove_proc_entry(drm_proc_list[i].name, minor->dev_root); 174 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
144 sprintf(name, "%d", minor->index); 175 sprintf(name, "%d", minor->index);
@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
480 return ret; 511 return ret;
481} 512}
482 513
514struct drm_gem_name_info_data {
515 int len;
516 char *buf;
517 int eof;
518};
519
520static int drm_gem_one_name_info(int id, void *ptr, void *data)
521{
522 struct drm_gem_object *obj = ptr;
523 struct drm_gem_name_info_data *nid = data;
524
525 DRM_INFO("name %d size %d\n", obj->name, obj->size);
526 if (nid->eof)
527 return 0;
528
529 nid->len += sprintf(&nid->buf[nid->len],
530 "%6d%9d%8d%9d\n",
531 obj->name, obj->size,
532 atomic_read(&obj->handlecount.refcount),
533 atomic_read(&obj->refcount.refcount));
534 if (nid->len > DRM_PROC_LIMIT) {
535 nid->eof = 1;
536 return 0;
537 }
538 return 0;
539}
540
541static int drm_gem_name_info(char *buf, char **start, off_t offset,
542 int request, int *eof, void *data)
543{
544 struct drm_minor *minor = (struct drm_minor *) data;
545 struct drm_device *dev = minor->dev;
546 struct drm_gem_name_info_data nid;
547
548 if (offset > DRM_PROC_LIMIT) {
549 *eof = 1;
550 return 0;
551 }
552
553 nid.len = sprintf(buf, " name size handles refcount\n");
554 nid.buf = buf;
555 nid.eof = 0;
556 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
557
558 *start = &buf[offset];
559 *eof = 0;
560 if (nid.len > request + offset)
561 return request;
562 *eof = 1;
563 return nid.len - offset;
564}
565
566static int drm_gem_object_info(char *buf, char **start, off_t offset,
567 int request, int *eof, void *data)
568{
569 struct drm_minor *minor = (struct drm_minor *) data;
570 struct drm_device *dev = minor->dev;
571 int len = 0;
572
573 if (offset > DRM_PROC_LIMIT) {
574 *eof = 1;
575 return 0;
576 }
577
578 *start = &buf[offset];
579 *eof = 0;
580 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
581 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
582 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
583 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
584 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
585 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
586 if (len > request + offset)
587 return request;
588 *eof = 1;
589 return len - offset;
590}
591
483#if DRM_DEBUG_CODE 592#if DRM_DEBUG_CODE
484 593
485static int drm__vma_info(char *buf, char **start, off_t offset, int request, 594static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c2f584f3b46c..82f4657b8879 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -152,6 +152,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
152 goto error_out_unreg; 152 goto error_out_unreg;
153 } 153 }
154 154
155 if (driver->driver_features & DRIVER_GEM) {
156 retcode = drm_gem_init(dev);
157 if (retcode) {
158 DRM_ERROR("Cannot initialize graphics execution "
159 "manager (GEM)\n");
160 goto error_out_unreg;
161 }
162 }
163
155 return 0; 164 return 0;
156 165
157 error_out_unreg: 166 error_out_unreg:
@@ -317,6 +326,7 @@ int drm_put_dev(struct drm_device * dev)
317int drm_put_minor(struct drm_minor **minor_p) 326int drm_put_minor(struct drm_minor **minor_p)
318{ 327{
319 struct drm_minor *minor = *minor_p; 328 struct drm_minor *minor = *minor_p;
329
320 DRM_DEBUG("release secondary minor %d\n", minor->index); 330 DRM_DEBUG("release secondary minor %d\n", minor->index);
321 331
322 if (minor->type == DRM_MINOR_LEGACY) 332 if (minor->type == DRM_MINOR_LEGACY)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c4bbda662e26..5ba78e4fd2b5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,11 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
7 i915_suspend.o 7 i915_suspend.o \
8 i915_gem.o \
9 i915_gem_debug.o \
10 i915_gem_proc.o \
11 i915_gem_tiling.o
8 12
9i915-$(CONFIG_COMPAT) += i915_ioc32.o 13i915-$(CONFIG_COMPAT) += i915_ioc32.o
10 14
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 63c6803d471b..f167ff68a4bf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -170,24 +170,31 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
170 dev_priv->sarea_priv = (drm_i915_sarea_t *) 170 dev_priv->sarea_priv = (drm_i915_sarea_t *)
171 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 171 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
172 172
173 dev_priv->ring.Start = init->ring_start; 173 if (init->ring_size != 0) {
174 dev_priv->ring.End = init->ring_end; 174 if (dev_priv->ring.ring_obj != NULL) {
175 dev_priv->ring.Size = init->ring_size; 175 i915_dma_cleanup(dev);
176 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 176 DRM_ERROR("Client tried to initialize ringbuffer in "
177 "GEM mode\n");
178 return -EINVAL;
179 }
177 180
178 dev_priv->ring.map.offset = init->ring_start; 181 dev_priv->ring.Size = init->ring_size;
179 dev_priv->ring.map.size = init->ring_size; 182 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
180 dev_priv->ring.map.type = 0;
181 dev_priv->ring.map.flags = 0;
182 dev_priv->ring.map.mtrr = 0;
183 183
184 drm_core_ioremap(&dev_priv->ring.map, dev); 184 dev_priv->ring.map.offset = init->ring_start;
185 dev_priv->ring.map.size = init->ring_size;
186 dev_priv->ring.map.type = 0;
187 dev_priv->ring.map.flags = 0;
188 dev_priv->ring.map.mtrr = 0;
185 189
186 if (dev_priv->ring.map.handle == NULL) { 190 drm_core_ioremap(&dev_priv->ring.map, dev);
187 i915_dma_cleanup(dev); 191
188 DRM_ERROR("can not ioremap virtual address for" 192 if (dev_priv->ring.map.handle == NULL) {
189 " ring buffer\n"); 193 i915_dma_cleanup(dev);
190 return -ENOMEM; 194 DRM_ERROR("can not ioremap virtual address for"
195 " ring buffer\n");
196 return -ENOMEM;
197 }
191 } 198 }
192 199
193 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 200 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -377,9 +384,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
377 return 0; 384 return 0;
378} 385}
379 386
380static int i915_emit_box(struct drm_device * dev, 387int
381 struct drm_clip_rect __user * boxes, 388i915_emit_box(struct drm_device *dev,
382 int i, int DR1, int DR4) 389 struct drm_clip_rect __user *boxes,
390 int i, int DR1, int DR4)
383{ 391{
384 drm_i915_private_t *dev_priv = dev->dev_private; 392 drm_i915_private_t *dev_priv = dev->dev_private;
385 struct drm_clip_rect box; 393 struct drm_clip_rect box;
@@ -681,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
681 case I915_PARAM_LAST_DISPATCH: 689 case I915_PARAM_LAST_DISPATCH:
682 value = READ_BREADCRUMB(dev_priv); 690 value = READ_BREADCRUMB(dev_priv);
683 break; 691 break;
692 case I915_PARAM_HAS_GEM:
693 value = 1;
694 break;
684 default: 695 default:
685 DRM_ERROR("Unknown parameter %d\n", param->param); 696 DRM_ERROR("Unknown parameter %d\n", param->param);
686 return -EINVAL; 697 return -EINVAL;
@@ -784,6 +795,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
784 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 795 memset(dev_priv, 0, sizeof(drm_i915_private_t));
785 796
786 dev->dev_private = (void *)dev_priv; 797 dev->dev_private = (void *)dev_priv;
798 dev_priv->dev = dev;
787 799
788 /* Add register map (needed for suspend/resume) */ 800 /* Add register map (needed for suspend/resume) */
789 base = drm_get_resource_start(dev, mmio_bar); 801 base = drm_get_resource_start(dev, mmio_bar);
@@ -793,6 +805,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
793 _DRM_KERNEL | _DRM_DRIVER, 805 _DRM_KERNEL | _DRM_DRIVER,
794 &dev_priv->mmio_map); 806 &dev_priv->mmio_map);
795 807
808 i915_gem_load(dev);
809
796 /* Init HWS */ 810 /* Init HWS */
797 if (!I915_NEED_GFX_HWS(dev)) { 811 if (!I915_NEED_GFX_HWS(dev)) {
798 ret = i915_init_phys_hws(dev); 812 ret = i915_init_phys_hws(dev);
@@ -838,6 +852,25 @@ int i915_driver_unload(struct drm_device *dev)
838 return 0; 852 return 0;
839} 853}
840 854
855int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
856{
857 struct drm_i915_file_private *i915_file_priv;
858
859 DRM_DEBUG("\n");
860 i915_file_priv = (struct drm_i915_file_private *)
861 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
862
863 if (!i915_file_priv)
864 return -ENOMEM;
865
866 file_priv->driver_priv = i915_file_priv;
867
868 i915_file_priv->mm.last_gem_seqno = 0;
869 i915_file_priv->mm.last_gem_throttle_seqno = 0;
870
871 return 0;
872}
873
841void i915_driver_lastclose(struct drm_device * dev) 874void i915_driver_lastclose(struct drm_device * dev)
842{ 875{
843 drm_i915_private_t *dev_priv = dev->dev_private; 876 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -845,6 +878,8 @@ void i915_driver_lastclose(struct drm_device * dev)
845 if (!dev_priv) 878 if (!dev_priv)
846 return; 879 return;
847 880
881 i915_gem_lastclose(dev);
882
848 if (dev_priv->agp_heap) 883 if (dev_priv->agp_heap)
849 i915_mem_takedown(&(dev_priv->agp_heap)); 884 i915_mem_takedown(&(dev_priv->agp_heap));
850 885
@@ -857,6 +892,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
857 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 892 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
858} 893}
859 894
895void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
896{
897 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
898
899 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
900}
901
860struct drm_ioctl_desc i915_ioctls[] = { 902struct drm_ioctl_desc i915_ioctls[] = {
861 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 903 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
862 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 904 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -875,6 +917,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
875 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 917 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
876 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 918 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
877 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), 919 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
920 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
921 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
922 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
923 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
924 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
925 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
926 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
927 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
928 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
929 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
930 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
931 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
932 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
933 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
934 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
935 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
878}; 936};
879 937
880int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 938int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 37af03f4db36..a80ead215282 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -85,12 +85,15 @@ static struct drm_driver driver = {
85 /* don't use mtrr's here, the Xserver or user space app should 85 /* don't use mtrr's here, the Xserver or user space app should
86 * deal with them for intel hardware. 86 * deal with them for intel hardware.
87 */ 87 */
88 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 88 .driver_features =
89 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 89 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
90 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
90 .load = i915_driver_load, 91 .load = i915_driver_load,
91 .unload = i915_driver_unload, 92 .unload = i915_driver_unload,
93 .open = i915_driver_open,
92 .lastclose = i915_driver_lastclose, 94 .lastclose = i915_driver_lastclose,
93 .preclose = i915_driver_preclose, 95 .preclose = i915_driver_preclose,
96 .postclose = i915_driver_postclose,
94 .suspend = i915_suspend, 97 .suspend = i915_suspend,
95 .resume = i915_resume, 98 .resume = i915_resume,
96 .device_is_agp = i915_driver_device_is_agp, 99 .device_is_agp = i915_driver_device_is_agp,
@@ -104,6 +107,10 @@ static struct drm_driver driver = {
104 .reclaim_buffers = drm_core_reclaim_buffers, 107 .reclaim_buffers = drm_core_reclaim_buffers,
105 .get_map_ofs = drm_core_get_map_ofs, 108 .get_map_ofs = drm_core_get_map_ofs,
106 .get_reg_ofs = drm_core_get_reg_ofs, 109 .get_reg_ofs = drm_core_get_reg_ofs,
110 .proc_init = i915_gem_proc_init,
111 .proc_cleanup = i915_gem_proc_cleanup,
112 .gem_init_object = i915_gem_init_object,
113 .gem_free_object = i915_gem_free_object,
107 .ioctls = i915_ioctls, 114 .ioctls = i915_ioctls,
108 .fops = { 115 .fops = {
109 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d1a02bead458..87b071ab8647 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -39,7 +39,7 @@
39 39
40#define DRIVER_NAME "i915" 40#define DRIVER_NAME "i915"
41#define DRIVER_DESC "Intel Graphics" 41#define DRIVER_DESC "Intel Graphics"
42#define DRIVER_DATE "20060119" 42#define DRIVER_DATE "20080730"
43 43
44enum pipe { 44enum pipe {
45 PIPE_A = 0, 45 PIPE_A = 0,
@@ -60,16 +60,23 @@ enum pipe {
60#define DRIVER_MINOR 6 60#define DRIVER_MINOR 6
61#define DRIVER_PATCHLEVEL 0 61#define DRIVER_PATCHLEVEL 0
62 62
63#define WATCH_COHERENCY 0
64#define WATCH_BUF 0
65#define WATCH_EXEC 0
66#define WATCH_LRU 0
67#define WATCH_RELOC 0
68#define WATCH_INACTIVE 0
69#define WATCH_PWRITE 0
70
63typedef struct _drm_i915_ring_buffer { 71typedef struct _drm_i915_ring_buffer {
64 int tail_mask; 72 int tail_mask;
65 unsigned long Start;
66 unsigned long End;
67 unsigned long Size; 73 unsigned long Size;
68 u8 *virtual_start; 74 u8 *virtual_start;
69 int head; 75 int head;
70 int tail; 76 int tail;
71 int space; 77 int space;
72 drm_local_map_t map; 78 drm_local_map_t map;
79 struct drm_gem_object *ring_obj;
73} drm_i915_ring_buffer_t; 80} drm_i915_ring_buffer_t;
74 81
75struct mem_block { 82struct mem_block {
@@ -101,6 +108,8 @@ struct intel_opregion {
101}; 108};
102 109
103typedef struct drm_i915_private { 110typedef struct drm_i915_private {
111 struct drm_device *dev;
112
104 drm_local_map_t *sarea; 113 drm_local_map_t *sarea;
105 drm_local_map_t *mmio_map; 114 drm_local_map_t *mmio_map;
106 115
@@ -113,6 +122,7 @@ typedef struct drm_i915_private {
113 uint32_t counter; 122 uint32_t counter;
114 unsigned int status_gfx_addr; 123 unsigned int status_gfx_addr;
115 drm_local_map_t hws_map; 124 drm_local_map_t hws_map;
125 struct drm_gem_object *hws_obj;
116 126
117 unsigned int cpp; 127 unsigned int cpp;
118 int back_offset; 128 int back_offset;
@@ -122,7 +132,6 @@ typedef struct drm_i915_private {
122 132
123 wait_queue_head_t irq_queue; 133 wait_queue_head_t irq_queue;
124 atomic_t irq_received; 134 atomic_t irq_received;
125 atomic_t irq_emitted;
126 /** Protects user_irq_refcount and irq_mask_reg */ 135 /** Protects user_irq_refcount and irq_mask_reg */
127 spinlock_t user_irq_lock; 136 spinlock_t user_irq_lock;
128 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 137 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
@@ -230,8 +239,174 @@ typedef struct drm_i915_private {
230 u8 saveDACMASK; 239 u8 saveDACMASK;
231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
232 u8 saveCR[37]; 241 u8 saveCR[37];
242
243 struct {
244 struct drm_mm gtt_space;
245
246 /**
247 * List of objects currently involved in rendering from the
248 * ringbuffer.
249 *
250 * A reference is held on the buffer while on this list.
251 */
252 struct list_head active_list;
253
254 /**
255 * List of objects which are not in the ringbuffer but which
256 * still have a write_domain which needs to be flushed before
257 * unbinding.
258 *
259 * A reference is held on the buffer while on this list.
260 */
261 struct list_head flushing_list;
262
263 /**
264 * LRU list of objects which are not in the ringbuffer and
265 * are ready to unbind, but are still in the GTT.
266 *
267 * A reference is not held on the buffer while on this list,
268 * as merely being GTT-bound shouldn't prevent its being
269 * freed, and we'll pull it off the list in the free path.
270 */
271 struct list_head inactive_list;
272
273 /**
274 * List of breadcrumbs associated with GPU requests currently
275 * outstanding.
276 */
277 struct list_head request_list;
278
279 /**
280 * We leave the user IRQ off as much as possible,
281 * but this means that requests will finish and never
282 * be retired once the system goes idle. Set a timer to
283 * fire periodically while the ring is running. When it
284 * fires, go retire requests.
285 */
286 struct delayed_work retire_work;
287
288 uint32_t next_gem_seqno;
289
290 /**
291 * Waiting sequence number, if any
292 */
293 uint32_t waiting_gem_seqno;
294
295 /**
296 * Last seq seen at irq time
297 */
298 uint32_t irq_gem_seqno;
299
300 /**
301 * Flag if the X Server, and thus DRM, is not currently in
302 * control of the device.
303 *
304 * This is set between LeaveVT and EnterVT. It needs to be
305 * replaced with a semaphore. It also needs to be
306 * transitioned away from for kernel modesetting.
307 */
308 int suspended;
309
310 /**
311 * Flag if the hardware appears to be wedged.
312 *
313 * This is set when attempts to idle the device timeout.
314 * It prevents command submission from occuring and makes
315 * every pending request fail
316 */
317 int wedged;
318
319 /** Bit 6 swizzling required for X tiling */
320 uint32_t bit_6_swizzle_x;
321 /** Bit 6 swizzling required for Y tiling */
322 uint32_t bit_6_swizzle_y;
323 } mm;
233} drm_i915_private_t; 324} drm_i915_private_t;
234 325
326/** driver private structure attached to each drm_gem_object */
327struct drm_i915_gem_object {
328 struct drm_gem_object *obj;
329
330 /** Current space allocated to this object in the GTT, if any. */
331 struct drm_mm_node *gtt_space;
332
333 /** This object's place on the active/flushing/inactive lists */
334 struct list_head list;
335
336 /**
337 * This is set if the object is on the active or flushing lists
338 * (has pending rendering), and is not set if it's on inactive (ready
339 * to be unbound).
340 */
341 int active;
342
343 /**
344 * This is set if the object has been written to since last bound
345 * to the GTT
346 */
347 int dirty;
348
349 /** AGP memory structure for our GTT binding. */
350 DRM_AGP_MEM *agp_mem;
351
352 struct page **page_list;
353
354 /**
355 * Current offset of the object in GTT space.
356 *
357 * This is the same as gtt_space->start
358 */
359 uint32_t gtt_offset;
360
361 /** Boolean whether this object has a valid gtt offset. */
362 int gtt_bound;
363
364 /** How many users have pinned this object in GTT space */
365 int pin_count;
366
367 /** Breadcrumb of last rendering to the buffer. */
368 uint32_t last_rendering_seqno;
369
370 /** Current tiling mode for the object. */
371 uint32_t tiling_mode;
372
373 /**
374 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
375 * GEM_DOMAIN_CPU is not in the object's read domain.
376 */
377 uint8_t *page_cpu_valid;
378};
379
380/**
381 * Request queue structure.
382 *
383 * The request queue allows us to note sequence numbers that have been emitted
384 * and may be associated with active buffers to be retired.
385 *
386 * By keeping this list, we can avoid having to do questionable
387 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
388 * an emission time with seqnos for tracking how far ahead of the GPU we are.
389 */
390struct drm_i915_gem_request {
391 /** GEM sequence number associated with this request. */
392 uint32_t seqno;
393
394 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies;
396
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list;
401};
402
403struct drm_i915_file_private {
404 struct {
405 uint32_t last_gem_seqno;
406 uint32_t last_gem_throttle_seqno;
407 } mm;
408};
409
235extern struct drm_ioctl_desc i915_ioctls[]; 410extern struct drm_ioctl_desc i915_ioctls[];
236extern int i915_max_ioctl; 411extern int i915_max_ioctl;
237 412
@@ -239,18 +414,26 @@ extern int i915_max_ioctl;
239extern void i915_kernel_lost_context(struct drm_device * dev); 414extern void i915_kernel_lost_context(struct drm_device * dev);
240extern int i915_driver_load(struct drm_device *, unsigned long flags); 415extern int i915_driver_load(struct drm_device *, unsigned long flags);
241extern int i915_driver_unload(struct drm_device *); 416extern int i915_driver_unload(struct drm_device *);
417extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
242extern void i915_driver_lastclose(struct drm_device * dev); 418extern void i915_driver_lastclose(struct drm_device * dev);
243extern void i915_driver_preclose(struct drm_device *dev, 419extern void i915_driver_preclose(struct drm_device *dev,
244 struct drm_file *file_priv); 420 struct drm_file *file_priv);
421extern void i915_driver_postclose(struct drm_device *dev,
422 struct drm_file *file_priv);
245extern int i915_driver_device_is_agp(struct drm_device * dev); 423extern int i915_driver_device_is_agp(struct drm_device * dev);
246extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 424extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
247 unsigned long arg); 425 unsigned long arg);
426extern int i915_emit_box(struct drm_device *dev,
427 struct drm_clip_rect __user *boxes,
428 int i, int DR1, int DR4);
248 429
249/* i915_irq.c */ 430/* i915_irq.c */
250extern int i915_irq_emit(struct drm_device *dev, void *data, 431extern int i915_irq_emit(struct drm_device *dev, void *data,
251 struct drm_file *file_priv); 432 struct drm_file *file_priv);
252extern int i915_irq_wait(struct drm_device *dev, void *data, 433extern int i915_irq_wait(struct drm_device *dev, void *data,
253 struct drm_file *file_priv); 434 struct drm_file *file_priv);
435void i915_user_irq_get(struct drm_device *dev);
436void i915_user_irq_put(struct drm_device *dev);
254 437
255extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 438extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
256extern void i915_driver_irq_preinstall(struct drm_device * dev); 439extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
279extern void i915_mem_takedown(struct mem_block **heap); 462extern void i915_mem_takedown(struct mem_block **heap);
280extern void i915_mem_release(struct drm_device * dev, 463extern void i915_mem_release(struct drm_device * dev,
281 struct drm_file *file_priv, struct mem_block *heap); 464 struct drm_file *file_priv, struct mem_block *heap);
465/* i915_gem.c */
466int i915_gem_init_ioctl(struct drm_device *dev, void *data,
467 struct drm_file *file_priv);
468int i915_gem_create_ioctl(struct drm_device *dev, void *data,
469 struct drm_file *file_priv);
470int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
471 struct drm_file *file_priv);
472int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
473 struct drm_file *file_priv);
474int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv);
476int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file_priv);
478int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file_priv);
480int i915_gem_execbuffer(struct drm_device *dev, void *data,
481 struct drm_file *file_priv);
482int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *file_priv);
484int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *file_priv);
486int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
487 struct drm_file *file_priv);
488int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv);
490int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
491 struct drm_file *file_priv);
492int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
493 struct drm_file *file_priv);
494int i915_gem_set_tiling(struct drm_device *dev, void *data,
495 struct drm_file *file_priv);
496int i915_gem_get_tiling(struct drm_device *dev, void *data,
497 struct drm_file *file_priv);
498void i915_gem_load(struct drm_device *dev);
499int i915_gem_proc_init(struct drm_minor *minor);
500void i915_gem_proc_cleanup(struct drm_minor *minor);
501int i915_gem_init_object(struct drm_gem_object *obj);
502void i915_gem_free_object(struct drm_gem_object *obj);
503int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
504void i915_gem_object_unpin(struct drm_gem_object *obj);
505void i915_gem_lastclose(struct drm_device *dev);
506uint32_t i915_get_gem_seqno(struct drm_device *dev);
507void i915_gem_retire_requests(struct drm_device *dev);
508void i915_gem_retire_work_handler(struct work_struct *work);
509void i915_gem_clflush_object(struct drm_gem_object *obj);
510
511/* i915_gem_tiling.c */
512void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
513
514/* i915_gem_debug.c */
515void i915_gem_dump_object(struct drm_gem_object *obj, int len,
516 const char *where, uint32_t mark);
517#if WATCH_INACTIVE
518void i915_verify_inactive(struct drm_device *dev, char *file, int line);
519#else
520#define i915_verify_inactive(dev, file, line)
521#endif
522void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
523void i915_gem_dump_object(struct drm_gem_object *obj, int len,
524 const char *where, uint32_t mark);
525void i915_dump_lru(struct drm_device *dev, const char *where);
282 526
283/* i915_suspend.c */ 527/* i915_suspend.c */
284extern int i915_save_state(struct drm_device *dev); 528extern int i915_save_state(struct drm_device *dev);
@@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev);
347 */ 591 */
348#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 592#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
349#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 593#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
594#define I915_GEM_HWS_INDEX 0x10
350 595
351extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 596extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
352 597
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
new file mode 100644
index 000000000000..90ae8a0369f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -0,0 +1,2497 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
33
34static int
35i915_gem_object_set_domain(struct drm_gem_object *obj,
36 uint32_t read_domains,
37 uint32_t write_domain);
38static int
39i915_gem_object_set_domain_range(struct drm_gem_object *obj,
40 uint64_t offset,
41 uint64_t size,
42 uint32_t read_domains,
43 uint32_t write_domain);
44static int
45i915_gem_set_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv,
47 uint32_t read_domains,
48 uint32_t write_domain);
49static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
52
53int
54i915_gem_init_ioctl(struct drm_device *dev, void *data,
55 struct drm_file *file_priv)
56{
57 drm_i915_private_t *dev_priv = dev->dev_private;
58 struct drm_i915_gem_init *args = data;
59
60 mutex_lock(&dev->struct_mutex);
61
62 if (args->gtt_start >= args->gtt_end ||
63 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
64 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
65 mutex_unlock(&dev->struct_mutex);
66 return -EINVAL;
67 }
68
69 drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
70 args->gtt_end - args->gtt_start);
71
72 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
73
74 mutex_unlock(&dev->struct_mutex);
75
76 return 0;
77}
78
79
80/**
81 * Creates a new mm object and returns a handle to it.
82 */
83int
84i915_gem_create_ioctl(struct drm_device *dev, void *data,
85 struct drm_file *file_priv)
86{
87 struct drm_i915_gem_create *args = data;
88 struct drm_gem_object *obj;
89 int handle, ret;
90
91 args->size = roundup(args->size, PAGE_SIZE);
92
93 /* Allocate the new object */
94 obj = drm_gem_object_alloc(dev, args->size);
95 if (obj == NULL)
96 return -ENOMEM;
97
98 ret = drm_gem_handle_create(file_priv, obj, &handle);
99 mutex_lock(&dev->struct_mutex);
100 drm_gem_object_handle_unreference(obj);
101 mutex_unlock(&dev->struct_mutex);
102
103 if (ret)
104 return ret;
105
106 args->handle = handle;
107
108 return 0;
109}
110
111/**
112 * Reads data from the object referenced by handle.
113 *
114 * On error, the contents of *data are undefined.
115 */
116int
117i915_gem_pread_ioctl(struct drm_device *dev, void *data,
118 struct drm_file *file_priv)
119{
120 struct drm_i915_gem_pread *args = data;
121 struct drm_gem_object *obj;
122 struct drm_i915_gem_object *obj_priv;
123 ssize_t read;
124 loff_t offset;
125 int ret;
126
127 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
128 if (obj == NULL)
129 return -EBADF;
130 obj_priv = obj->driver_private;
131
132 /* Bounds check source.
133 *
134 * XXX: This could use review for overflow issues...
135 */
136 if (args->offset > obj->size || args->size > obj->size ||
137 args->offset + args->size > obj->size) {
138 drm_gem_object_unreference(obj);
139 return -EINVAL;
140 }
141
142 mutex_lock(&dev->struct_mutex);
143
144 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
145 I915_GEM_DOMAIN_CPU, 0);
146 if (ret != 0) {
147 drm_gem_object_unreference(obj);
148 mutex_unlock(&dev->struct_mutex);
149 }
150
151 offset = args->offset;
152
153 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
154 args->size, &offset);
155 if (read != args->size) {
156 drm_gem_object_unreference(obj);
157 mutex_unlock(&dev->struct_mutex);
158 if (read < 0)
159 return read;
160 else
161 return -EINVAL;
162 }
163
164 drm_gem_object_unreference(obj);
165 mutex_unlock(&dev->struct_mutex);
166
167 return 0;
168}
169
170static int
171i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
172 struct drm_i915_gem_pwrite *args,
173 struct drm_file *file_priv)
174{
175 struct drm_i915_gem_object *obj_priv = obj->driver_private;
176 ssize_t remain;
177 loff_t offset;
178 char __user *user_data;
179 char *vaddr;
180 int i, o, l;
181 int ret = 0;
182 unsigned long pfn;
183 unsigned long unwritten;
184
185 user_data = (char __user *) (uintptr_t) args->data_ptr;
186 remain = args->size;
187 if (!access_ok(VERIFY_READ, user_data, remain))
188 return -EFAULT;
189
190
191 mutex_lock(&dev->struct_mutex);
192 ret = i915_gem_object_pin(obj, 0);
193 if (ret) {
194 mutex_unlock(&dev->struct_mutex);
195 return ret;
196 }
197 ret = i915_gem_set_domain(obj, file_priv,
198 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
199 if (ret)
200 goto fail;
201
202 obj_priv = obj->driver_private;
203 offset = obj_priv->gtt_offset + args->offset;
204 obj_priv->dirty = 1;
205
206 while (remain > 0) {
207 /* Operation in this page
208 *
209 * i = page number
210 * o = offset within page
211 * l = bytes to copy
212 */
213 i = offset >> PAGE_SHIFT;
214 o = offset & (PAGE_SIZE-1);
215 l = remain;
216 if ((o + l) > PAGE_SIZE)
217 l = PAGE_SIZE - o;
218
219 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
220
221#ifdef CONFIG_HIGHMEM
222 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
223 */
224 vaddr = kmap_atomic_pfn(pfn, KM_USER0);
225#if WATCH_PWRITE
226 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
227 i, o, l, pfn, vaddr);
228#endif
229 unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
230 user_data, l);
231 kunmap_atomic(vaddr, KM_USER0);
232
233 if (unwritten)
234#endif /* CONFIG_HIGHMEM */
235 {
236 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
237#if WATCH_PWRITE
238 DRM_INFO("pwrite slow i %d o %d l %d "
239 "pfn %ld vaddr %p\n",
240 i, o, l, pfn, vaddr);
241#endif
242 if (vaddr == NULL) {
243 ret = -EFAULT;
244 goto fail;
245 }
246 unwritten = __copy_from_user(vaddr + o, user_data, l);
247#if WATCH_PWRITE
248 DRM_INFO("unwritten %ld\n", unwritten);
249#endif
250 iounmap(vaddr);
251 if (unwritten) {
252 ret = -EFAULT;
253 goto fail;
254 }
255 }
256
257 remain -= l;
258 user_data += l;
259 offset += l;
260 }
261#if WATCH_PWRITE && 1
262 i915_gem_clflush_object(obj);
263 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
264 i915_gem_clflush_object(obj);
265#endif
266
267fail:
268 i915_gem_object_unpin(obj);
269 mutex_unlock(&dev->struct_mutex);
270
271 return ret;
272}
273
274int
275i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
276 struct drm_i915_gem_pwrite *args,
277 struct drm_file *file_priv)
278{
279 int ret;
280 loff_t offset;
281 ssize_t written;
282
283 mutex_lock(&dev->struct_mutex);
284
285 ret = i915_gem_set_domain(obj, file_priv,
286 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
287 if (ret) {
288 mutex_unlock(&dev->struct_mutex);
289 return ret;
290 }
291
292 offset = args->offset;
293
294 written = vfs_write(obj->filp,
295 (char __user *)(uintptr_t) args->data_ptr,
296 args->size, &offset);
297 if (written != args->size) {
298 mutex_unlock(&dev->struct_mutex);
299 if (written < 0)
300 return written;
301 else
302 return -EINVAL;
303 }
304
305 mutex_unlock(&dev->struct_mutex);
306
307 return 0;
308}
309
310/**
311 * Writes data to the object referenced by handle.
312 *
313 * On error, the contents of the buffer that were to be modified are undefined.
314 */
315int
316i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
317 struct drm_file *file_priv)
318{
319 struct drm_i915_gem_pwrite *args = data;
320 struct drm_gem_object *obj;
321 struct drm_i915_gem_object *obj_priv;
322 int ret = 0;
323
324 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
325 if (obj == NULL)
326 return -EBADF;
327 obj_priv = obj->driver_private;
328
329 /* Bounds check destination.
330 *
331 * XXX: This could use review for overflow issues...
332 */
333 if (args->offset > obj->size || args->size > obj->size ||
334 args->offset + args->size > obj->size) {
335 drm_gem_object_unreference(obj);
336 return -EINVAL;
337 }
338
339 /* We can only do the GTT pwrite on untiled buffers, as otherwise
340 * it would end up going through the fenced access, and we'll get
341 * different detiling behavior between reading and writing.
342 * pread/pwrite currently are reading and writing from the CPU
343 * perspective, requiring manual detiling by the client.
344 */
345 if (obj_priv->tiling_mode == I915_TILING_NONE &&
346 dev->gtt_total != 0)
347 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
348 else
349 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
350
351#if WATCH_PWRITE
352 if (ret)
353 DRM_INFO("pwrite failed %d\n", ret);
354#endif
355
356 drm_gem_object_unreference(obj);
357
358 return ret;
359}
360
361/**
362 * Called when user space prepares to use an object
363 */
364int
365i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
366 struct drm_file *file_priv)
367{
368 struct drm_i915_gem_set_domain *args = data;
369 struct drm_gem_object *obj;
370 int ret;
371
372 if (!(dev->driver->driver_features & DRIVER_GEM))
373 return -ENODEV;
374
375 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
376 if (obj == NULL)
377 return -EBADF;
378
379 mutex_lock(&dev->struct_mutex);
380#if WATCH_BUF
381 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
382 obj, obj->size, args->read_domains, args->write_domain);
383#endif
384 ret = i915_gem_set_domain(obj, file_priv,
385 args->read_domains, args->write_domain);
386 drm_gem_object_unreference(obj);
387 mutex_unlock(&dev->struct_mutex);
388 return ret;
389}
390
391/**
392 * Called when user space has done writes to this buffer
393 */
394int
395i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *file_priv)
397{
398 struct drm_i915_gem_sw_finish *args = data;
399 struct drm_gem_object *obj;
400 struct drm_i915_gem_object *obj_priv;
401 int ret = 0;
402
403 if (!(dev->driver->driver_features & DRIVER_GEM))
404 return -ENODEV;
405
406 mutex_lock(&dev->struct_mutex);
407 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
408 if (obj == NULL) {
409 mutex_unlock(&dev->struct_mutex);
410 return -EBADF;
411 }
412
413#if WATCH_BUF
414 DRM_INFO("%s: sw_finish %d (%p %d)\n",
415 __func__, args->handle, obj, obj->size);
416#endif
417 obj_priv = obj->driver_private;
418
419 /* Pinned buffers may be scanout, so flush the cache */
420 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
421 i915_gem_clflush_object(obj);
422 drm_agp_chipset_flush(dev);
423 }
424 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex);
426 return ret;
427}
428
429/**
430 * Maps the contents of an object, returning the address it is mapped
431 * into.
432 *
433 * While the mapping holds a reference on the contents of the object, it doesn't
434 * imply a ref on the object itself.
435 */
436int
437i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
438 struct drm_file *file_priv)
439{
440 struct drm_i915_gem_mmap *args = data;
441 struct drm_gem_object *obj;
442 loff_t offset;
443 unsigned long addr;
444
445 if (!(dev->driver->driver_features & DRIVER_GEM))
446 return -ENODEV;
447
448 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
449 if (obj == NULL)
450 return -EBADF;
451
452 offset = args->offset;
453
454 down_write(&current->mm->mmap_sem);
455 addr = do_mmap(obj->filp, 0, args->size,
456 PROT_READ | PROT_WRITE, MAP_SHARED,
457 args->offset);
458 up_write(&current->mm->mmap_sem);
459 mutex_lock(&dev->struct_mutex);
460 drm_gem_object_unreference(obj);
461 mutex_unlock(&dev->struct_mutex);
462 if (IS_ERR((void *)addr))
463 return addr;
464
465 args->addr_ptr = (uint64_t) addr;
466
467 return 0;
468}
469
470static void
471i915_gem_object_free_page_list(struct drm_gem_object *obj)
472{
473 struct drm_i915_gem_object *obj_priv = obj->driver_private;
474 int page_count = obj->size / PAGE_SIZE;
475 int i;
476
477 if (obj_priv->page_list == NULL)
478 return;
479
480
481 for (i = 0; i < page_count; i++)
482 if (obj_priv->page_list[i] != NULL) {
483 if (obj_priv->dirty)
484 set_page_dirty(obj_priv->page_list[i]);
485 mark_page_accessed(obj_priv->page_list[i]);
486 page_cache_release(obj_priv->page_list[i]);
487 }
488 obj_priv->dirty = 0;
489
490 drm_free(obj_priv->page_list,
491 page_count * sizeof(struct page *),
492 DRM_MEM_DRIVER);
493 obj_priv->page_list = NULL;
494}
495
496static void
497i915_gem_object_move_to_active(struct drm_gem_object *obj)
498{
499 struct drm_device *dev = obj->dev;
500 drm_i915_private_t *dev_priv = dev->dev_private;
501 struct drm_i915_gem_object *obj_priv = obj->driver_private;
502
503 /* Add a reference if we're newly entering the active list. */
504 if (!obj_priv->active) {
505 drm_gem_object_reference(obj);
506 obj_priv->active = 1;
507 }
508 /* Move from whatever list we were on to the tail of execution. */
509 list_move_tail(&obj_priv->list,
510 &dev_priv->mm.active_list);
511}
512
513
514static void
515i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
516{
517 struct drm_device *dev = obj->dev;
518 drm_i915_private_t *dev_priv = dev->dev_private;
519 struct drm_i915_gem_object *obj_priv = obj->driver_private;
520
521 i915_verify_inactive(dev, __FILE__, __LINE__);
522 if (obj_priv->pin_count != 0)
523 list_del_init(&obj_priv->list);
524 else
525 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
526
527 if (obj_priv->active) {
528 obj_priv->active = 0;
529 drm_gem_object_unreference(obj);
530 }
531 i915_verify_inactive(dev, __FILE__, __LINE__);
532}
533
534/**
535 * Creates a new sequence number, emitting a write of it to the status page
536 * plus an interrupt, which will trigger i915_user_interrupt_handler.
537 *
538 * Must be called with struct_lock held.
539 *
540 * Returned sequence numbers are nonzero on success.
541 */
542static uint32_t
543i915_add_request(struct drm_device *dev, uint32_t flush_domains)
544{
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_i915_gem_request *request;
547 uint32_t seqno;
548 int was_empty;
549 RING_LOCALS;
550
551 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
552 if (request == NULL)
553 return 0;
554
555 /* Grab the seqno we're going to make this request be, and bump the
556 * next (skipping 0 so it can be the reserved no-seqno value).
557 */
558 seqno = dev_priv->mm.next_gem_seqno;
559 dev_priv->mm.next_gem_seqno++;
560 if (dev_priv->mm.next_gem_seqno == 0)
561 dev_priv->mm.next_gem_seqno++;
562
563 BEGIN_LP_RING(4);
564 OUT_RING(MI_STORE_DWORD_INDEX);
565 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
566 OUT_RING(seqno);
567
568 OUT_RING(MI_USER_INTERRUPT);
569 ADVANCE_LP_RING();
570
571 DRM_DEBUG("%d\n", seqno);
572
573 request->seqno = seqno;
574 request->emitted_jiffies = jiffies;
575 request->flush_domains = flush_domains;
576 was_empty = list_empty(&dev_priv->mm.request_list);
577 list_add_tail(&request->list, &dev_priv->mm.request_list);
578
579 if (was_empty)
580 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
581 return seqno;
582}
583
584/**
585 * Command execution barrier
586 *
587 * Ensures that all commands in the ring are finished
588 * before signalling the CPU
589 */
590uint32_t
591i915_retire_commands(struct drm_device *dev)
592{
593 drm_i915_private_t *dev_priv = dev->dev_private;
594 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
595 uint32_t flush_domains = 0;
596 RING_LOCALS;
597
598 /* The sampler always gets flushed on i965 (sigh) */
599 if (IS_I965G(dev))
600 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
601 BEGIN_LP_RING(2);
602 OUT_RING(cmd);
603 OUT_RING(0); /* noop */
604 ADVANCE_LP_RING();
605 return flush_domains;
606}
607
608/**
609 * Moves buffers associated only with the given active seqno from the active
610 * to inactive list, potentially freeing them.
611 */
612static void
613i915_gem_retire_request(struct drm_device *dev,
614 struct drm_i915_gem_request *request)
615{
616 drm_i915_private_t *dev_priv = dev->dev_private;
617
618 /* Move any buffers on the active list that are no longer referenced
619 * by the ringbuffer to the flushing/inactive lists as appropriate.
620 */
621 while (!list_empty(&dev_priv->mm.active_list)) {
622 struct drm_gem_object *obj;
623 struct drm_i915_gem_object *obj_priv;
624
625 obj_priv = list_first_entry(&dev_priv->mm.active_list,
626 struct drm_i915_gem_object,
627 list);
628 obj = obj_priv->obj;
629
630 /* If the seqno being retired doesn't match the oldest in the
631 * list, then the oldest in the list must still be newer than
632 * this seqno.
633 */
634 if (obj_priv->last_rendering_seqno != request->seqno)
635 return;
636#if WATCH_LRU
637 DRM_INFO("%s: retire %d moves to inactive list %p\n",
638 __func__, request->seqno, obj);
639#endif
640
641 if (obj->write_domain != 0) {
642 list_move_tail(&obj_priv->list,
643 &dev_priv->mm.flushing_list);
644 } else {
645 i915_gem_object_move_to_inactive(obj);
646 }
647 }
648
649 if (request->flush_domains != 0) {
650 struct drm_i915_gem_object *obj_priv, *next;
651
652 /* Clear the write domain and activity from any buffers
653 * that are just waiting for a flush matching the one retired.
654 */
655 list_for_each_entry_safe(obj_priv, next,
656 &dev_priv->mm.flushing_list, list) {
657 struct drm_gem_object *obj = obj_priv->obj;
658
659 if (obj->write_domain & request->flush_domains) {
660 obj->write_domain = 0;
661 i915_gem_object_move_to_inactive(obj);
662 }
663 }
664
665 }
666}
667
668/**
669 * Returns true if seq1 is later than seq2.
670 */
671static int
672i915_seqno_passed(uint32_t seq1, uint32_t seq2)
673{
674 return (int32_t)(seq1 - seq2) >= 0;
675}
676
677uint32_t
678i915_get_gem_seqno(struct drm_device *dev)
679{
680 drm_i915_private_t *dev_priv = dev->dev_private;
681
682 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
683}
684
685/**
686 * This function clears the request list as sequence numbers are passed.
687 */
688void
689i915_gem_retire_requests(struct drm_device *dev)
690{
691 drm_i915_private_t *dev_priv = dev->dev_private;
692 uint32_t seqno;
693
694 seqno = i915_get_gem_seqno(dev);
695
696 while (!list_empty(&dev_priv->mm.request_list)) {
697 struct drm_i915_gem_request *request;
698 uint32_t retiring_seqno;
699
700 request = list_first_entry(&dev_priv->mm.request_list,
701 struct drm_i915_gem_request,
702 list);
703 retiring_seqno = request->seqno;
704
705 if (i915_seqno_passed(seqno, retiring_seqno) ||
706 dev_priv->mm.wedged) {
707 i915_gem_retire_request(dev, request);
708
709 list_del(&request->list);
710 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
711 } else
712 break;
713 }
714}
715
716void
717i915_gem_retire_work_handler(struct work_struct *work)
718{
719 drm_i915_private_t *dev_priv;
720 struct drm_device *dev;
721
722 dev_priv = container_of(work, drm_i915_private_t,
723 mm.retire_work.work);
724 dev = dev_priv->dev;
725
726 mutex_lock(&dev->struct_mutex);
727 i915_gem_retire_requests(dev);
728 if (!list_empty(&dev_priv->mm.request_list))
729 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
730 mutex_unlock(&dev->struct_mutex);
731}
732
733/**
734 * Waits for a sequence number to be signaled, and cleans up the
735 * request and object lists appropriately for that event.
736 */
737int
738i915_wait_request(struct drm_device *dev, uint32_t seqno)
739{
740 drm_i915_private_t *dev_priv = dev->dev_private;
741 int ret = 0;
742
743 BUG_ON(seqno == 0);
744
745 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
746 dev_priv->mm.waiting_gem_seqno = seqno;
747 i915_user_irq_get(dev);
748 ret = wait_event_interruptible(dev_priv->irq_queue,
749 i915_seqno_passed(i915_get_gem_seqno(dev),
750 seqno) ||
751 dev_priv->mm.wedged);
752 i915_user_irq_put(dev);
753 dev_priv->mm.waiting_gem_seqno = 0;
754 }
755 if (dev_priv->mm.wedged)
756 ret = -EIO;
757
758 if (ret && ret != -ERESTARTSYS)
759 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
760 __func__, ret, seqno, i915_get_gem_seqno(dev));
761
762 /* Directly dispatch request retiring. While we have the work queue
763 * to handle this, the waiter on a request often wants an associated
764 * buffer to have made it to the inactive list, and we would need
765 * a separate wait queue to handle that.
766 */
767 if (ret == 0)
768 i915_gem_retire_requests(dev);
769
770 return ret;
771}
772
773static void
774i915_gem_flush(struct drm_device *dev,
775 uint32_t invalidate_domains,
776 uint32_t flush_domains)
777{
778 drm_i915_private_t *dev_priv = dev->dev_private;
779 uint32_t cmd;
780 RING_LOCALS;
781
782#if WATCH_EXEC
783 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
784 invalidate_domains, flush_domains);
785#endif
786
787 if (flush_domains & I915_GEM_DOMAIN_CPU)
788 drm_agp_chipset_flush(dev);
789
790 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
791 I915_GEM_DOMAIN_GTT)) {
792 /*
793 * read/write caches:
794 *
795 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
796 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
797 * also flushed at 2d versus 3d pipeline switches.
798 *
799 * read-only caches:
800 *
801 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
802 * MI_READ_FLUSH is set, and is always flushed on 965.
803 *
804 * I915_GEM_DOMAIN_COMMAND may not exist?
805 *
806 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
807 * invalidated when MI_EXE_FLUSH is set.
808 *
809 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
810 * invalidated with every MI_FLUSH.
811 *
812 * TLBs:
813 *
814 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
815 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
816 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
817 * are flushed at any MI_FLUSH.
818 */
819
820 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
821 if ((invalidate_domains|flush_domains) &
822 I915_GEM_DOMAIN_RENDER)
823 cmd &= ~MI_NO_WRITE_FLUSH;
824 if (!IS_I965G(dev)) {
825 /*
826 * On the 965, the sampler cache always gets flushed
827 * and this bit is reserved.
828 */
829 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
830 cmd |= MI_READ_FLUSH;
831 }
832 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
833 cmd |= MI_EXE_FLUSH;
834
835#if WATCH_EXEC
836 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
837#endif
838 BEGIN_LP_RING(2);
839 OUT_RING(cmd);
840 OUT_RING(0); /* noop */
841 ADVANCE_LP_RING();
842 }
843}
844
845/**
846 * Ensures that all rendering to the object has completed and the object is
847 * safe to unbind from the GTT or access from the CPU.
848 */
849static int
850i915_gem_object_wait_rendering(struct drm_gem_object *obj)
851{
852 struct drm_device *dev = obj->dev;
853 struct drm_i915_gem_object *obj_priv = obj->driver_private;
854 int ret;
855
856 /* If there are writes queued to the buffer, flush and
857 * create a new seqno to wait for.
858 */
859 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
860 uint32_t write_domain = obj->write_domain;
861#if WATCH_BUF
862 DRM_INFO("%s: flushing object %p from write domain %08x\n",
863 __func__, obj, write_domain);
864#endif
865 i915_gem_flush(dev, 0, write_domain);
866
867 i915_gem_object_move_to_active(obj);
868 obj_priv->last_rendering_seqno = i915_add_request(dev,
869 write_domain);
870 BUG_ON(obj_priv->last_rendering_seqno == 0);
871#if WATCH_LRU
872 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
873#endif
874 }
875
876 /* If there is rendering queued on the buffer being evicted, wait for
877 * it.
878 */
879 if (obj_priv->active) {
880#if WATCH_BUF
881 DRM_INFO("%s: object %p wait for seqno %08x\n",
882 __func__, obj, obj_priv->last_rendering_seqno);
883#endif
884 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
885 if (ret != 0)
886 return ret;
887 }
888
889 return 0;
890}
891
892/**
893 * Unbinds an object from the GTT aperture.
894 */
895static int
896i915_gem_object_unbind(struct drm_gem_object *obj)
897{
898 struct drm_device *dev = obj->dev;
899 struct drm_i915_gem_object *obj_priv = obj->driver_private;
900 int ret = 0;
901
902#if WATCH_BUF
903 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
904 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
905#endif
906 if (obj_priv->gtt_space == NULL)
907 return 0;
908
909 if (obj_priv->pin_count != 0) {
910 DRM_ERROR("Attempting to unbind pinned buffer\n");
911 return -EINVAL;
912 }
913
914 /* Wait for any rendering to complete
915 */
916 ret = i915_gem_object_wait_rendering(obj);
917 if (ret) {
918 DRM_ERROR("wait_rendering failed: %d\n", ret);
919 return ret;
920 }
921
922 /* Move the object to the CPU domain to ensure that
923 * any possible CPU writes while it's not in the GTT
924 * are flushed when we go to remap it. This will
925 * also ensure that all pending GPU writes are finished
926 * before we unbind.
927 */
928 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
929 I915_GEM_DOMAIN_CPU);
930 if (ret) {
931 DRM_ERROR("set_domain failed: %d\n", ret);
932 return ret;
933 }
934
935 if (obj_priv->agp_mem != NULL) {
936 drm_unbind_agp(obj_priv->agp_mem);
937 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
938 obj_priv->agp_mem = NULL;
939 }
940
941 BUG_ON(obj_priv->active);
942
943 i915_gem_object_free_page_list(obj);
944
945 if (obj_priv->gtt_space) {
946 atomic_dec(&dev->gtt_count);
947 atomic_sub(obj->size, &dev->gtt_memory);
948
949 drm_mm_put_block(obj_priv->gtt_space);
950 obj_priv->gtt_space = NULL;
951 }
952
953 /* Remove ourselves from the LRU list if present. */
954 if (!list_empty(&obj_priv->list))
955 list_del_init(&obj_priv->list);
956
957 return 0;
958}
959
960static int
961i915_gem_evict_something(struct drm_device *dev)
962{
963 drm_i915_private_t *dev_priv = dev->dev_private;
964 struct drm_gem_object *obj;
965 struct drm_i915_gem_object *obj_priv;
966 int ret = 0;
967
968 for (;;) {
969 /* If there's an inactive buffer available now, grab it
970 * and be done.
971 */
972 if (!list_empty(&dev_priv->mm.inactive_list)) {
973 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
974 struct drm_i915_gem_object,
975 list);
976 obj = obj_priv->obj;
977 BUG_ON(obj_priv->pin_count != 0);
978#if WATCH_LRU
979 DRM_INFO("%s: evicting %p\n", __func__, obj);
980#endif
981 BUG_ON(obj_priv->active);
982
983 /* Wait on the rendering and unbind the buffer. */
984 ret = i915_gem_object_unbind(obj);
985 break;
986 }
987
988 /* If we didn't get anything, but the ring is still processing
989 * things, wait for one of those things to finish and hopefully
990 * leave us a buffer to evict.
991 */
992 if (!list_empty(&dev_priv->mm.request_list)) {
993 struct drm_i915_gem_request *request;
994
995 request = list_first_entry(&dev_priv->mm.request_list,
996 struct drm_i915_gem_request,
997 list);
998
999 ret = i915_wait_request(dev, request->seqno);
1000 if (ret)
1001 break;
1002
1003 /* if waiting caused an object to become inactive,
1004 * then loop around and wait for it. Otherwise, we
1005 * assume that waiting freed and unbound something,
1006 * so there should now be some space in the GTT
1007 */
1008 if (!list_empty(&dev_priv->mm.inactive_list))
1009 continue;
1010 break;
1011 }
1012
1013 /* If we didn't have anything on the request list but there
1014 * are buffers awaiting a flush, emit one and try again.
1015 * When we wait on it, those buffers waiting for that flush
1016 * will get moved to inactive.
1017 */
1018 if (!list_empty(&dev_priv->mm.flushing_list)) {
1019 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1020 struct drm_i915_gem_object,
1021 list);
1022 obj = obj_priv->obj;
1023
1024 i915_gem_flush(dev,
1025 obj->write_domain,
1026 obj->write_domain);
1027 i915_add_request(dev, obj->write_domain);
1028
1029 obj = NULL;
1030 continue;
1031 }
1032
1033 DRM_ERROR("inactive empty %d request empty %d "
1034 "flushing empty %d\n",
1035 list_empty(&dev_priv->mm.inactive_list),
1036 list_empty(&dev_priv->mm.request_list),
1037 list_empty(&dev_priv->mm.flushing_list));
1038 /* If we didn't do any of the above, there's nothing to be done
1039 * and we just can't fit it in.
1040 */
1041 return -ENOMEM;
1042 }
1043 return ret;
1044}
1045
1046static int
1047i915_gem_object_get_page_list(struct drm_gem_object *obj)
1048{
1049 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1050 int page_count, i;
1051 struct address_space *mapping;
1052 struct inode *inode;
1053 struct page *page;
1054 int ret;
1055
1056 if (obj_priv->page_list)
1057 return 0;
1058
1059 /* Get the list of pages out of our struct file. They'll be pinned
1060 * at this point until we release them.
1061 */
1062 page_count = obj->size / PAGE_SIZE;
1063 BUG_ON(obj_priv->page_list != NULL);
1064 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1065 DRM_MEM_DRIVER);
1066 if (obj_priv->page_list == NULL) {
1067 DRM_ERROR("Faled to allocate page list\n");
1068 return -ENOMEM;
1069 }
1070
1071 inode = obj->filp->f_path.dentry->d_inode;
1072 mapping = inode->i_mapping;
1073 for (i = 0; i < page_count; i++) {
1074 page = read_mapping_page(mapping, i, NULL);
1075 if (IS_ERR(page)) {
1076 ret = PTR_ERR(page);
1077 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1078 i915_gem_object_free_page_list(obj);
1079 return ret;
1080 }
1081 obj_priv->page_list[i] = page;
1082 }
1083 return 0;
1084}
1085
1086/**
1087 * Finds free space in the GTT aperture and binds the object there.
1088 */
1089static int
1090i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1091{
1092 struct drm_device *dev = obj->dev;
1093 drm_i915_private_t *dev_priv = dev->dev_private;
1094 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1095 struct drm_mm_node *free_space;
1096 int page_count, ret;
1097
1098 if (alignment == 0)
1099 alignment = PAGE_SIZE;
1100 if (alignment & (PAGE_SIZE - 1)) {
1101 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1102 return -EINVAL;
1103 }
1104
1105 search_free:
1106 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1107 obj->size, alignment, 0);
1108 if (free_space != NULL) {
1109 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1110 alignment);
1111 if (obj_priv->gtt_space != NULL) {
1112 obj_priv->gtt_space->private = obj;
1113 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1114 }
1115 }
1116 if (obj_priv->gtt_space == NULL) {
1117 /* If the gtt is empty and we're still having trouble
1118 * fitting our object in, we're out of memory.
1119 */
1120#if WATCH_LRU
1121 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1122#endif
1123 if (list_empty(&dev_priv->mm.inactive_list) &&
1124 list_empty(&dev_priv->mm.flushing_list) &&
1125 list_empty(&dev_priv->mm.active_list)) {
1126 DRM_ERROR("GTT full, but LRU list empty\n");
1127 return -ENOMEM;
1128 }
1129
1130 ret = i915_gem_evict_something(dev);
1131 if (ret != 0) {
1132 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1133 return ret;
1134 }
1135 goto search_free;
1136 }
1137
1138#if WATCH_BUF
1139 DRM_INFO("Binding object of size %d at 0x%08x\n",
1140 obj->size, obj_priv->gtt_offset);
1141#endif
1142 ret = i915_gem_object_get_page_list(obj);
1143 if (ret) {
1144 drm_mm_put_block(obj_priv->gtt_space);
1145 obj_priv->gtt_space = NULL;
1146 return ret;
1147 }
1148
1149 page_count = obj->size / PAGE_SIZE;
1150 /* Create an AGP memory structure pointing at our pages, and bind it
1151 * into the GTT.
1152 */
1153 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1154 obj_priv->page_list,
1155 page_count,
1156 obj_priv->gtt_offset);
1157 if (obj_priv->agp_mem == NULL) {
1158 i915_gem_object_free_page_list(obj);
1159 drm_mm_put_block(obj_priv->gtt_space);
1160 obj_priv->gtt_space = NULL;
1161 return -ENOMEM;
1162 }
1163 atomic_inc(&dev->gtt_count);
1164 atomic_add(obj->size, &dev->gtt_memory);
1165
1166 /* Assert that the object is not currently in any GPU domain. As it
1167 * wasn't in the GTT, there shouldn't be any way it could have been in
1168 * a GPU cache
1169 */
1170 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1171 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1172
1173 return 0;
1174}
1175
1176void
1177i915_gem_clflush_object(struct drm_gem_object *obj)
1178{
1179 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1180
1181 /* If we don't have a page list set up, then we're not pinned
1182 * to GPU, and we can ignore the cache flush because it'll happen
1183 * again at bind time.
1184 */
1185 if (obj_priv->page_list == NULL)
1186 return;
1187
1188 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1189}
1190
1191/*
1192 * Set the next domain for the specified object. This
1193 * may not actually perform the necessary flushing/invaliding though,
1194 * as that may want to be batched with other set_domain operations
1195 *
1196 * This is (we hope) the only really tricky part of gem. The goal
1197 * is fairly simple -- track which caches hold bits of the object
1198 * and make sure they remain coherent. A few concrete examples may
1199 * help to explain how it works. For shorthand, we use the notation
1200 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1201 * a pair of read and write domain masks.
1202 *
1203 * Case 1: the batch buffer
1204 *
1205 * 1. Allocated
1206 * 2. Written by CPU
1207 * 3. Mapped to GTT
1208 * 4. Read by GPU
1209 * 5. Unmapped from GTT
1210 * 6. Freed
1211 *
1212 * Let's take these a step at a time
1213 *
1214 * 1. Allocated
1215 * Pages allocated from the kernel may still have
1216 * cache contents, so we set them to (CPU, CPU) always.
1217 * 2. Written by CPU (using pwrite)
1218 * The pwrite function calls set_domain (CPU, CPU) and
1219 * this function does nothing (as nothing changes)
1220 * 3. Mapped by GTT
1221 * This function asserts that the object is not
1222 * currently in any GPU-based read or write domains
1223 * 4. Read by GPU
1224 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1225 * As write_domain is zero, this function adds in the
1226 * current read domains (CPU+COMMAND, 0).
1227 * flush_domains is set to CPU.
1228 * invalidate_domains is set to COMMAND
1229 * clflush is run to get data out of the CPU caches
1230 * then i915_dev_set_domain calls i915_gem_flush to
1231 * emit an MI_FLUSH and drm_agp_chipset_flush
1232 * 5. Unmapped from GTT
1233 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1234 * flush_domains and invalidate_domains end up both zero
1235 * so no flushing/invalidating happens
1236 * 6. Freed
1237 * yay, done
1238 *
1239 * Case 2: The shared render buffer
1240 *
1241 * 1. Allocated
1242 * 2. Mapped to GTT
1243 * 3. Read/written by GPU
1244 * 4. set_domain to (CPU,CPU)
1245 * 5. Read/written by CPU
1246 * 6. Read/written by GPU
1247 *
1248 * 1. Allocated
1249 * Same as last example, (CPU, CPU)
1250 * 2. Mapped to GTT
1251 * Nothing changes (assertions find that it is not in the GPU)
1252 * 3. Read/written by GPU
1253 * execbuffer calls set_domain (RENDER, RENDER)
1254 * flush_domains gets CPU
1255 * invalidate_domains gets GPU
1256 * clflush (obj)
1257 * MI_FLUSH and drm_agp_chipset_flush
1258 * 4. set_domain (CPU, CPU)
1259 * flush_domains gets GPU
1260 * invalidate_domains gets CPU
1261 * wait_rendering (obj) to make sure all drawing is complete.
1262 * This will include an MI_FLUSH to get the data from GPU
1263 * to memory
1264 * clflush (obj) to invalidate the CPU cache
1265 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1266 * 5. Read/written by CPU
1267 * cache lines are loaded and dirtied
1268 * 6. Read written by GPU
1269 * Same as last GPU access
1270 *
1271 * Case 3: The constant buffer
1272 *
1273 * 1. Allocated
1274 * 2. Written by CPU
1275 * 3. Read by GPU
1276 * 4. Updated (written) by CPU again
1277 * 5. Read by GPU
1278 *
1279 * 1. Allocated
1280 * (CPU, CPU)
1281 * 2. Written by CPU
1282 * (CPU, CPU)
1283 * 3. Read by GPU
1284 * (CPU+RENDER, 0)
1285 * flush_domains = CPU
1286 * invalidate_domains = RENDER
1287 * clflush (obj)
1288 * MI_FLUSH
1289 * drm_agp_chipset_flush
1290 * 4. Updated (written) by CPU again
1291 * (CPU, CPU)
1292 * flush_domains = 0 (no previous write domain)
1293 * invalidate_domains = 0 (no new read domains)
1294 * 5. Read by GPU
1295 * (CPU+RENDER, 0)
1296 * flush_domains = CPU
1297 * invalidate_domains = RENDER
1298 * clflush (obj)
1299 * MI_FLUSH
1300 * drm_agp_chipset_flush
1301 */
1302static int
1303i915_gem_object_set_domain(struct drm_gem_object *obj,
1304 uint32_t read_domains,
1305 uint32_t write_domain)
1306{
1307 struct drm_device *dev = obj->dev;
1308 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1309 uint32_t invalidate_domains = 0;
1310 uint32_t flush_domains = 0;
1311 int ret;
1312
1313#if WATCH_BUF
1314 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1315 __func__, obj,
1316 obj->read_domains, read_domains,
1317 obj->write_domain, write_domain);
1318#endif
1319 /*
1320 * If the object isn't moving to a new write domain,
1321 * let the object stay in multiple read domains
1322 */
1323 if (write_domain == 0)
1324 read_domains |= obj->read_domains;
1325 else
1326 obj_priv->dirty = 1;
1327
1328 /*
1329 * Flush the current write domain if
1330 * the new read domains don't match. Invalidate
1331 * any read domains which differ from the old
1332 * write domain
1333 */
1334 if (obj->write_domain && obj->write_domain != read_domains) {
1335 flush_domains |= obj->write_domain;
1336 invalidate_domains |= read_domains & ~obj->write_domain;
1337 }
1338 /*
1339 * Invalidate any read caches which may have
1340 * stale data. That is, any new read domains.
1341 */
1342 invalidate_domains |= read_domains & ~obj->read_domains;
1343 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1344#if WATCH_BUF
1345 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1346 __func__, flush_domains, invalidate_domains);
1347#endif
1348 /*
1349 * If we're invaliding the CPU cache and flushing a GPU cache,
1350 * then pause for rendering so that the GPU caches will be
1351 * flushed before the cpu cache is invalidated
1352 */
1353 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1354 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1355 I915_GEM_DOMAIN_GTT))) {
1356 ret = i915_gem_object_wait_rendering(obj);
1357 if (ret)
1358 return ret;
1359 }
1360 i915_gem_clflush_object(obj);
1361 }
1362
1363 if ((write_domain | flush_domains) != 0)
1364 obj->write_domain = write_domain;
1365
1366 /* If we're invalidating the CPU domain, clear the per-page CPU
1367 * domain list as well.
1368 */
1369 if (obj_priv->page_cpu_valid != NULL &&
1370 (write_domain != 0 ||
1371 read_domains & I915_GEM_DOMAIN_CPU)) {
1372 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1373 DRM_MEM_DRIVER);
1374 obj_priv->page_cpu_valid = NULL;
1375 }
1376 obj->read_domains = read_domains;
1377
1378 dev->invalidate_domains |= invalidate_domains;
1379 dev->flush_domains |= flush_domains;
1380#if WATCH_BUF
1381 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1382 __func__,
1383 obj->read_domains, obj->write_domain,
1384 dev->invalidate_domains, dev->flush_domains);
1385#endif
1386 return 0;
1387}
1388
1389/**
1390 * Set the read/write domain on a range of the object.
1391 *
1392 * Currently only implemented for CPU reads, otherwise drops to normal
1393 * i915_gem_object_set_domain().
1394 */
1395static int
1396i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1397 uint64_t offset,
1398 uint64_t size,
1399 uint32_t read_domains,
1400 uint32_t write_domain)
1401{
1402 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1403 int ret, i;
1404
1405 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1406 return 0;
1407
1408 if (read_domains != I915_GEM_DOMAIN_CPU ||
1409 write_domain != 0)
1410 return i915_gem_object_set_domain(obj,
1411 read_domains, write_domain);
1412
1413 /* Wait on any GPU rendering to the object to be flushed. */
1414 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1415 ret = i915_gem_object_wait_rendering(obj);
1416 if (ret)
1417 return ret;
1418 }
1419
1420 if (obj_priv->page_cpu_valid == NULL) {
1421 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1422 DRM_MEM_DRIVER);
1423 }
1424
1425 /* Flush the cache on any pages that are still invalid from the CPU's
1426 * perspective.
1427 */
1428 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1429 if (obj_priv->page_cpu_valid[i])
1430 continue;
1431
1432 drm_clflush_pages(obj_priv->page_list + i, 1);
1433
1434 obj_priv->page_cpu_valid[i] = 1;
1435 }
1436
1437 return 0;
1438}
1439
1440/**
1441 * Once all of the objects have been set in the proper domain,
1442 * perform the necessary flush and invalidate operations.
1443 *
1444 * Returns the write domains flushed, for use in flush tracking.
1445 */
1446static uint32_t
1447i915_gem_dev_set_domain(struct drm_device *dev)
1448{
1449 uint32_t flush_domains = dev->flush_domains;
1450
1451 /*
1452 * Now that all the buffers are synced to the proper domains,
1453 * flush and invalidate the collected domains
1454 */
1455 if (dev->invalidate_domains | dev->flush_domains) {
1456#if WATCH_EXEC
1457 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1458 __func__,
1459 dev->invalidate_domains,
1460 dev->flush_domains);
1461#endif
1462 i915_gem_flush(dev,
1463 dev->invalidate_domains,
1464 dev->flush_domains);
1465 dev->invalidate_domains = 0;
1466 dev->flush_domains = 0;
1467 }
1468
1469 return flush_domains;
1470}
1471
1472/**
1473 * Pin an object to the GTT and evaluate the relocations landing in it.
1474 */
1475static int
1476i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1477 struct drm_file *file_priv,
1478 struct drm_i915_gem_exec_object *entry)
1479{
1480 struct drm_device *dev = obj->dev;
1481 struct drm_i915_gem_relocation_entry reloc;
1482 struct drm_i915_gem_relocation_entry __user *relocs;
1483 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1484 int i, ret;
1485 uint32_t last_reloc_offset = -1;
1486 void *reloc_page = NULL;
1487
1488 /* Choose the GTT offset for our buffer and put it there. */
1489 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1490 if (ret)
1491 return ret;
1492
1493 entry->offset = obj_priv->gtt_offset;
1494
1495 relocs = (struct drm_i915_gem_relocation_entry __user *)
1496 (uintptr_t) entry->relocs_ptr;
1497 /* Apply the relocations, using the GTT aperture to avoid cache
1498 * flushing requirements.
1499 */
1500 for (i = 0; i < entry->relocation_count; i++) {
1501 struct drm_gem_object *target_obj;
1502 struct drm_i915_gem_object *target_obj_priv;
1503 uint32_t reloc_val, reloc_offset, *reloc_entry;
1504 int ret;
1505
1506 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1507 if (ret != 0) {
1508 i915_gem_object_unpin(obj);
1509 return ret;
1510 }
1511
1512 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1513 reloc.target_handle);
1514 if (target_obj == NULL) {
1515 i915_gem_object_unpin(obj);
1516 return -EBADF;
1517 }
1518 target_obj_priv = target_obj->driver_private;
1519
1520 /* The target buffer should have appeared before us in the
1521 * exec_object list, so it should have a GTT space bound by now.
1522 */
1523 if (target_obj_priv->gtt_space == NULL) {
1524 DRM_ERROR("No GTT space found for object %d\n",
1525 reloc.target_handle);
1526 drm_gem_object_unreference(target_obj);
1527 i915_gem_object_unpin(obj);
1528 return -EINVAL;
1529 }
1530
1531 if (reloc.offset > obj->size - 4) {
1532 DRM_ERROR("Relocation beyond object bounds: "
1533 "obj %p target %d offset %d size %d.\n",
1534 obj, reloc.target_handle,
1535 (int) reloc.offset, (int) obj->size);
1536 drm_gem_object_unreference(target_obj);
1537 i915_gem_object_unpin(obj);
1538 return -EINVAL;
1539 }
1540 if (reloc.offset & 3) {
1541 DRM_ERROR("Relocation not 4-byte aligned: "
1542 "obj %p target %d offset %d.\n",
1543 obj, reloc.target_handle,
1544 (int) reloc.offset);
1545 drm_gem_object_unreference(target_obj);
1546 i915_gem_object_unpin(obj);
1547 return -EINVAL;
1548 }
1549
1550 if (reloc.write_domain && target_obj->pending_write_domain &&
1551 reloc.write_domain != target_obj->pending_write_domain) {
1552 DRM_ERROR("Write domain conflict: "
1553 "obj %p target %d offset %d "
1554 "new %08x old %08x\n",
1555 obj, reloc.target_handle,
1556 (int) reloc.offset,
1557 reloc.write_domain,
1558 target_obj->pending_write_domain);
1559 drm_gem_object_unreference(target_obj);
1560 i915_gem_object_unpin(obj);
1561 return -EINVAL;
1562 }
1563
1564#if WATCH_RELOC
1565 DRM_INFO("%s: obj %p offset %08x target %d "
1566 "read %08x write %08x gtt %08x "
1567 "presumed %08x delta %08x\n",
1568 __func__,
1569 obj,
1570 (int) reloc.offset,
1571 (int) reloc.target_handle,
1572 (int) reloc.read_domains,
1573 (int) reloc.write_domain,
1574 (int) target_obj_priv->gtt_offset,
1575 (int) reloc.presumed_offset,
1576 reloc.delta);
1577#endif
1578
1579 target_obj->pending_read_domains |= reloc.read_domains;
1580 target_obj->pending_write_domain |= reloc.write_domain;
1581
1582 /* If the relocation already has the right value in it, no
1583 * more work needs to be done.
1584 */
1585 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1586 drm_gem_object_unreference(target_obj);
1587 continue;
1588 }
1589
1590 /* Now that we're going to actually write some data in,
1591 * make sure that any rendering using this buffer's contents
1592 * is completed.
1593 */
1594 i915_gem_object_wait_rendering(obj);
1595
1596 /* As we're writing through the gtt, flush
1597 * any CPU writes before we write the relocations
1598 */
1599 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1600 i915_gem_clflush_object(obj);
1601 drm_agp_chipset_flush(dev);
1602 obj->write_domain = 0;
1603 }
1604
1605 /* Map the page containing the relocation we're going to
1606 * perform.
1607 */
1608 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1609 if (reloc_page == NULL ||
1610 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1611 (reloc_offset & ~(PAGE_SIZE - 1))) {
1612 if (reloc_page != NULL)
1613 iounmap(reloc_page);
1614
1615 reloc_page = ioremap(dev->agp->base +
1616 (reloc_offset & ~(PAGE_SIZE - 1)),
1617 PAGE_SIZE);
1618 last_reloc_offset = reloc_offset;
1619 if (reloc_page == NULL) {
1620 drm_gem_object_unreference(target_obj);
1621 i915_gem_object_unpin(obj);
1622 return -ENOMEM;
1623 }
1624 }
1625
1626 reloc_entry = (uint32_t *)((char *)reloc_page +
1627 (reloc_offset & (PAGE_SIZE - 1)));
1628 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1629
1630#if WATCH_BUF
1631 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1632 obj, (unsigned int) reloc.offset,
1633 readl(reloc_entry), reloc_val);
1634#endif
1635 writel(reloc_val, reloc_entry);
1636
1637 /* Write the updated presumed offset for this entry back out
1638 * to the user.
1639 */
1640 reloc.presumed_offset = target_obj_priv->gtt_offset;
1641 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1642 if (ret != 0) {
1643 drm_gem_object_unreference(target_obj);
1644 i915_gem_object_unpin(obj);
1645 return ret;
1646 }
1647
1648 drm_gem_object_unreference(target_obj);
1649 }
1650
1651 if (reloc_page != NULL)
1652 iounmap(reloc_page);
1653
1654#if WATCH_BUF
1655 if (0)
1656 i915_gem_dump_object(obj, 128, __func__, ~0);
1657#endif
1658 return 0;
1659}
1660
1661/** Dispatch a batchbuffer to the ring
1662 */
1663static int
1664i915_dispatch_gem_execbuffer(struct drm_device *dev,
1665 struct drm_i915_gem_execbuffer *exec,
1666 uint64_t exec_offset)
1667{
1668 drm_i915_private_t *dev_priv = dev->dev_private;
1669 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1670 (uintptr_t) exec->cliprects_ptr;
1671 int nbox = exec->num_cliprects;
1672 int i = 0, count;
1673 uint32_t exec_start, exec_len;
1674 RING_LOCALS;
1675
1676 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1677 exec_len = (uint32_t) exec->batch_len;
1678
1679 if ((exec_start | exec_len) & 0x7) {
1680 DRM_ERROR("alignment\n");
1681 return -EINVAL;
1682 }
1683
1684 if (!exec_start)
1685 return -EINVAL;
1686
1687 count = nbox ? nbox : 1;
1688
1689 for (i = 0; i < count; i++) {
1690 if (i < nbox) {
1691 int ret = i915_emit_box(dev, boxes, i,
1692 exec->DR1, exec->DR4);
1693 if (ret)
1694 return ret;
1695 }
1696
1697 if (IS_I830(dev) || IS_845G(dev)) {
1698 BEGIN_LP_RING(4);
1699 OUT_RING(MI_BATCH_BUFFER);
1700 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1701 OUT_RING(exec_start + exec_len - 4);
1702 OUT_RING(0);
1703 ADVANCE_LP_RING();
1704 } else {
1705 BEGIN_LP_RING(2);
1706 if (IS_I965G(dev)) {
1707 OUT_RING(MI_BATCH_BUFFER_START |
1708 (2 << 6) |
1709 MI_BATCH_NON_SECURE_I965);
1710 OUT_RING(exec_start);
1711 } else {
1712 OUT_RING(MI_BATCH_BUFFER_START |
1713 (2 << 6));
1714 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1715 }
1716 ADVANCE_LP_RING();
1717 }
1718 }
1719
1720 /* XXX breadcrumb */
1721 return 0;
1722}
1723
1724/* Throttle our rendering by waiting until the ring has completed our requests
1725 * emitted over 20 msec ago.
1726 *
1727 * This should get us reasonable parallelism between CPU and GPU but also
1728 * relatively low latency when blocking on a particular request to finish.
1729 */
1730static int
1731i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1732{
1733 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1734 int ret = 0;
1735 uint32_t seqno;
1736
1737 mutex_lock(&dev->struct_mutex);
1738 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1739 i915_file_priv->mm.last_gem_throttle_seqno =
1740 i915_file_priv->mm.last_gem_seqno;
1741 if (seqno)
1742 ret = i915_wait_request(dev, seqno);
1743 mutex_unlock(&dev->struct_mutex);
1744 return ret;
1745}
1746
1747int
1748i915_gem_execbuffer(struct drm_device *dev, void *data,
1749 struct drm_file *file_priv)
1750{
1751 drm_i915_private_t *dev_priv = dev->dev_private;
1752 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1753 struct drm_i915_gem_execbuffer *args = data;
1754 struct drm_i915_gem_exec_object *exec_list = NULL;
1755 struct drm_gem_object **object_list = NULL;
1756 struct drm_gem_object *batch_obj;
1757 int ret, i, pinned = 0;
1758 uint64_t exec_offset;
1759 uint32_t seqno, flush_domains;
1760
1761#if WATCH_EXEC
1762 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1763 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1764#endif
1765
1766 /* Copy in the exec list from userland */
1767 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1768 DRM_MEM_DRIVER);
1769 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1770 DRM_MEM_DRIVER);
1771 if (exec_list == NULL || object_list == NULL) {
1772 DRM_ERROR("Failed to allocate exec or object list "
1773 "for %d buffers\n",
1774 args->buffer_count);
1775 ret = -ENOMEM;
1776 goto pre_mutex_err;
1777 }
1778 ret = copy_from_user(exec_list,
1779 (struct drm_i915_relocation_entry __user *)
1780 (uintptr_t) args->buffers_ptr,
1781 sizeof(*exec_list) * args->buffer_count);
1782 if (ret != 0) {
1783 DRM_ERROR("copy %d exec entries failed %d\n",
1784 args->buffer_count, ret);
1785 goto pre_mutex_err;
1786 }
1787
1788 mutex_lock(&dev->struct_mutex);
1789
1790 i915_verify_inactive(dev, __FILE__, __LINE__);
1791
1792 if (dev_priv->mm.wedged) {
1793 DRM_ERROR("Execbuf while wedged\n");
1794 mutex_unlock(&dev->struct_mutex);
1795 return -EIO;
1796 }
1797
1798 if (dev_priv->mm.suspended) {
1799 DRM_ERROR("Execbuf while VT-switched.\n");
1800 mutex_unlock(&dev->struct_mutex);
1801 return -EBUSY;
1802 }
1803
1804 /* Zero the gloabl flush/invalidate flags. These
1805 * will be modified as each object is bound to the
1806 * gtt
1807 */
1808 dev->invalidate_domains = 0;
1809 dev->flush_domains = 0;
1810
1811 /* Look up object handles and perform the relocations */
1812 for (i = 0; i < args->buffer_count; i++) {
1813 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1814 exec_list[i].handle);
1815 if (object_list[i] == NULL) {
1816 DRM_ERROR("Invalid object handle %d at index %d\n",
1817 exec_list[i].handle, i);
1818 ret = -EBADF;
1819 goto err;
1820 }
1821
1822 object_list[i]->pending_read_domains = 0;
1823 object_list[i]->pending_write_domain = 0;
1824 ret = i915_gem_object_pin_and_relocate(object_list[i],
1825 file_priv,
1826 &exec_list[i]);
1827 if (ret) {
1828 DRM_ERROR("object bind and relocate failed %d\n", ret);
1829 goto err;
1830 }
1831 pinned = i + 1;
1832 }
1833
1834 /* Set the pending read domains for the batch buffer to COMMAND */
1835 batch_obj = object_list[args->buffer_count-1];
1836 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1837 batch_obj->pending_write_domain = 0;
1838
1839 i915_verify_inactive(dev, __FILE__, __LINE__);
1840
1841 for (i = 0; i < args->buffer_count; i++) {
1842 struct drm_gem_object *obj = object_list[i];
1843 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1844
1845 if (obj_priv->gtt_space == NULL) {
1846 /* We evicted the buffer in the process of validating
1847 * our set of buffers in. We could try to recover by
1848 * kicking them everything out and trying again from
1849 * the start.
1850 */
1851 ret = -ENOMEM;
1852 goto err;
1853 }
1854
1855 /* make sure all previous memory operations have passed */
1856 ret = i915_gem_object_set_domain(obj,
1857 obj->pending_read_domains,
1858 obj->pending_write_domain);
1859 if (ret)
1860 goto err;
1861 }
1862
1863 i915_verify_inactive(dev, __FILE__, __LINE__);
1864
1865 /* Flush/invalidate caches and chipset buffer */
1866 flush_domains = i915_gem_dev_set_domain(dev);
1867
1868 i915_verify_inactive(dev, __FILE__, __LINE__);
1869
1870#if WATCH_COHERENCY
1871 for (i = 0; i < args->buffer_count; i++) {
1872 i915_gem_object_check_coherency(object_list[i],
1873 exec_list[i].handle);
1874 }
1875#endif
1876
1877 exec_offset = exec_list[args->buffer_count - 1].offset;
1878
1879#if WATCH_EXEC
1880 i915_gem_dump_object(object_list[args->buffer_count - 1],
1881 args->batch_len,
1882 __func__,
1883 ~0);
1884#endif
1885
1886 (void)i915_add_request(dev, flush_domains);
1887
1888 /* Exec the batchbuffer */
1889 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1890 if (ret) {
1891 DRM_ERROR("dispatch failed %d\n", ret);
1892 goto err;
1893 }
1894
1895 /*
1896 * Ensure that the commands in the batch buffer are
1897 * finished before the interrupt fires
1898 */
1899 flush_domains = i915_retire_commands(dev);
1900
1901 i915_verify_inactive(dev, __FILE__, __LINE__);
1902
1903 /*
1904 * Get a seqno representing the execution of the current buffer,
1905 * which we can wait on. We would like to mitigate these interrupts,
1906 * likely by only creating seqnos occasionally (so that we have
1907 * *some* interrupts representing completion of buffers that we can
1908 * wait on when trying to clear up gtt space).
1909 */
1910 seqno = i915_add_request(dev, flush_domains);
1911 BUG_ON(seqno == 0);
1912 i915_file_priv->mm.last_gem_seqno = seqno;
1913 for (i = 0; i < args->buffer_count; i++) {
1914 struct drm_gem_object *obj = object_list[i];
1915 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1916
1917 i915_gem_object_move_to_active(obj);
1918 obj_priv->last_rendering_seqno = seqno;
1919#if WATCH_LRU
1920 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1921#endif
1922 }
1923#if WATCH_LRU
1924 i915_dump_lru(dev, __func__);
1925#endif
1926
1927 i915_verify_inactive(dev, __FILE__, __LINE__);
1928
1929 /* Copy the new buffer offsets back to the user's exec list. */
1930 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1931 (uintptr_t) args->buffers_ptr,
1932 exec_list,
1933 sizeof(*exec_list) * args->buffer_count);
1934 if (ret)
1935 DRM_ERROR("failed to copy %d exec entries "
1936 "back to user (%d)\n",
1937 args->buffer_count, ret);
1938err:
1939 if (object_list != NULL) {
1940 for (i = 0; i < pinned; i++)
1941 i915_gem_object_unpin(object_list[i]);
1942
1943 for (i = 0; i < args->buffer_count; i++)
1944 drm_gem_object_unreference(object_list[i]);
1945 }
1946 mutex_unlock(&dev->struct_mutex);
1947
1948pre_mutex_err:
1949 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1950 DRM_MEM_DRIVER);
1951 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1952 DRM_MEM_DRIVER);
1953
1954 return ret;
1955}
1956
1957int
1958i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1959{
1960 struct drm_device *dev = obj->dev;
1961 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1962 int ret;
1963
1964 i915_verify_inactive(dev, __FILE__, __LINE__);
1965 if (obj_priv->gtt_space == NULL) {
1966 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1967 if (ret != 0) {
1968 DRM_ERROR("Failure to bind: %d", ret);
1969 return ret;
1970 }
1971 }
1972 obj_priv->pin_count++;
1973
1974 /* If the object is not active and not pending a flush,
1975 * remove it from the inactive list
1976 */
1977 if (obj_priv->pin_count == 1) {
1978 atomic_inc(&dev->pin_count);
1979 atomic_add(obj->size, &dev->pin_memory);
1980 if (!obj_priv->active &&
1981 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1982 I915_GEM_DOMAIN_GTT)) == 0 &&
1983 !list_empty(&obj_priv->list))
1984 list_del_init(&obj_priv->list);
1985 }
1986 i915_verify_inactive(dev, __FILE__, __LINE__);
1987
1988 return 0;
1989}
1990
1991void
1992i915_gem_object_unpin(struct drm_gem_object *obj)
1993{
1994 struct drm_device *dev = obj->dev;
1995 drm_i915_private_t *dev_priv = dev->dev_private;
1996 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1997
1998 i915_verify_inactive(dev, __FILE__, __LINE__);
1999 obj_priv->pin_count--;
2000 BUG_ON(obj_priv->pin_count < 0);
2001 BUG_ON(obj_priv->gtt_space == NULL);
2002
2003 /* If the object is no longer pinned, and is
2004 * neither active nor being flushed, then stick it on
2005 * the inactive list
2006 */
2007 if (obj_priv->pin_count == 0) {
2008 if (!obj_priv->active &&
2009 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2010 I915_GEM_DOMAIN_GTT)) == 0)
2011 list_move_tail(&obj_priv->list,
2012 &dev_priv->mm.inactive_list);
2013 atomic_dec(&dev->pin_count);
2014 atomic_sub(obj->size, &dev->pin_memory);
2015 }
2016 i915_verify_inactive(dev, __FILE__, __LINE__);
2017}
2018
2019int
2020i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2021 struct drm_file *file_priv)
2022{
2023 struct drm_i915_gem_pin *args = data;
2024 struct drm_gem_object *obj;
2025 struct drm_i915_gem_object *obj_priv;
2026 int ret;
2027
2028 mutex_lock(&dev->struct_mutex);
2029
2030 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2031 if (obj == NULL) {
2032 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2033 args->handle);
2034 mutex_unlock(&dev->struct_mutex);
2035 return -EBADF;
2036 }
2037 obj_priv = obj->driver_private;
2038
2039 ret = i915_gem_object_pin(obj, args->alignment);
2040 if (ret != 0) {
2041 drm_gem_object_unreference(obj);
2042 mutex_unlock(&dev->struct_mutex);
2043 return ret;
2044 }
2045
2046 /* XXX - flush the CPU caches for pinned objects
2047 * as the X server doesn't manage domains yet
2048 */
2049 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2050 i915_gem_clflush_object(obj);
2051 drm_agp_chipset_flush(dev);
2052 obj->write_domain = 0;
2053 }
2054 args->offset = obj_priv->gtt_offset;
2055 drm_gem_object_unreference(obj);
2056 mutex_unlock(&dev->struct_mutex);
2057
2058 return 0;
2059}
2060
2061int
2062i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2063 struct drm_file *file_priv)
2064{
2065 struct drm_i915_gem_pin *args = data;
2066 struct drm_gem_object *obj;
2067
2068 mutex_lock(&dev->struct_mutex);
2069
2070 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2071 if (obj == NULL) {
2072 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2073 args->handle);
2074 mutex_unlock(&dev->struct_mutex);
2075 return -EBADF;
2076 }
2077
2078 i915_gem_object_unpin(obj);
2079
2080 drm_gem_object_unreference(obj);
2081 mutex_unlock(&dev->struct_mutex);
2082 return 0;
2083}
2084
2085int
2086i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2087 struct drm_file *file_priv)
2088{
2089 struct drm_i915_gem_busy *args = data;
2090 struct drm_gem_object *obj;
2091 struct drm_i915_gem_object *obj_priv;
2092
2093 mutex_lock(&dev->struct_mutex);
2094 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2095 if (obj == NULL) {
2096 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2097 args->handle);
2098 mutex_unlock(&dev->struct_mutex);
2099 return -EBADF;
2100 }
2101
2102 obj_priv = obj->driver_private;
2103 args->busy = obj_priv->active;
2104
2105 drm_gem_object_unreference(obj);
2106 mutex_unlock(&dev->struct_mutex);
2107 return 0;
2108}
2109
2110int
2111i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2112 struct drm_file *file_priv)
2113{
2114 return i915_gem_ring_throttle(dev, file_priv);
2115}
2116
2117int i915_gem_init_object(struct drm_gem_object *obj)
2118{
2119 struct drm_i915_gem_object *obj_priv;
2120
2121 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2122 if (obj_priv == NULL)
2123 return -ENOMEM;
2124
2125 /*
2126 * We've just allocated pages from the kernel,
2127 * so they've just been written by the CPU with
2128 * zeros. They'll need to be clflushed before we
2129 * use them with the GPU.
2130 */
2131 obj->write_domain = I915_GEM_DOMAIN_CPU;
2132 obj->read_domains = I915_GEM_DOMAIN_CPU;
2133
2134 obj->driver_private = obj_priv;
2135 obj_priv->obj = obj;
2136 INIT_LIST_HEAD(&obj_priv->list);
2137 return 0;
2138}
2139
2140void i915_gem_free_object(struct drm_gem_object *obj)
2141{
2142 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2143
2144 while (obj_priv->pin_count > 0)
2145 i915_gem_object_unpin(obj);
2146
2147 i915_gem_object_unbind(obj);
2148
2149 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2150 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2151}
2152
2153static int
2154i915_gem_set_domain(struct drm_gem_object *obj,
2155 struct drm_file *file_priv,
2156 uint32_t read_domains,
2157 uint32_t write_domain)
2158{
2159 struct drm_device *dev = obj->dev;
2160 int ret;
2161 uint32_t flush_domains;
2162
2163 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2164
2165 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2166 if (ret)
2167 return ret;
2168 flush_domains = i915_gem_dev_set_domain(obj->dev);
2169
2170 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2171 (void) i915_add_request(dev, flush_domains);
2172
2173 return 0;
2174}
2175
2176/** Unbinds all objects that are on the given buffer list. */
2177static int
2178i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2179{
2180 struct drm_gem_object *obj;
2181 struct drm_i915_gem_object *obj_priv;
2182 int ret;
2183
2184 while (!list_empty(head)) {
2185 obj_priv = list_first_entry(head,
2186 struct drm_i915_gem_object,
2187 list);
2188 obj = obj_priv->obj;
2189
2190 if (obj_priv->pin_count != 0) {
2191 DRM_ERROR("Pinned object in unbind list\n");
2192 mutex_unlock(&dev->struct_mutex);
2193 return -EINVAL;
2194 }
2195
2196 ret = i915_gem_object_unbind(obj);
2197 if (ret != 0) {
2198 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2199 ret);
2200 mutex_unlock(&dev->struct_mutex);
2201 return ret;
2202 }
2203 }
2204
2205
2206 return 0;
2207}
2208
2209static int
2210i915_gem_idle(struct drm_device *dev)
2211{
2212 drm_i915_private_t *dev_priv = dev->dev_private;
2213 uint32_t seqno, cur_seqno, last_seqno;
2214 int stuck, ret;
2215
2216 if (dev_priv->mm.suspended)
2217 return 0;
2218
2219 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2220 * We need to replace this with a semaphore, or something.
2221 */
2222 dev_priv->mm.suspended = 1;
2223
2224 i915_kernel_lost_context(dev);
2225
2226 /* Flush the GPU along with all non-CPU write domains
2227 */
2228 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2229 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2230 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2231 I915_GEM_DOMAIN_GTT));
2232
2233 if (seqno == 0) {
2234 mutex_unlock(&dev->struct_mutex);
2235 return -ENOMEM;
2236 }
2237
2238 dev_priv->mm.waiting_gem_seqno = seqno;
2239 last_seqno = 0;
2240 stuck = 0;
2241 for (;;) {
2242 cur_seqno = i915_get_gem_seqno(dev);
2243 if (i915_seqno_passed(cur_seqno, seqno))
2244 break;
2245 if (last_seqno == cur_seqno) {
2246 if (stuck++ > 100) {
2247 DRM_ERROR("hardware wedged\n");
2248 dev_priv->mm.wedged = 1;
2249 DRM_WAKEUP(&dev_priv->irq_queue);
2250 break;
2251 }
2252 }
2253 msleep(10);
2254 last_seqno = cur_seqno;
2255 }
2256 dev_priv->mm.waiting_gem_seqno = 0;
2257
2258 i915_gem_retire_requests(dev);
2259
2260 /* Active and flushing should now be empty as we've
2261 * waited for a sequence higher than any pending execbuffer
2262 */
2263 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2264 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2265
2266 /* Request should now be empty as we've also waited
2267 * for the last request in the list
2268 */
2269 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2270
2271 /* Move all buffers out of the GTT. */
2272 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2273 if (ret)
2274 return ret;
2275
2276 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2277 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2278 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2279 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2280 return 0;
2281}
2282
2283static int
2284i915_gem_init_hws(struct drm_device *dev)
2285{
2286 drm_i915_private_t *dev_priv = dev->dev_private;
2287 struct drm_gem_object *obj;
2288 struct drm_i915_gem_object *obj_priv;
2289 int ret;
2290
2291 /* If we need a physical address for the status page, it's already
2292 * initialized at driver load time.
2293 */
2294 if (!I915_NEED_GFX_HWS(dev))
2295 return 0;
2296
2297 obj = drm_gem_object_alloc(dev, 4096);
2298 if (obj == NULL) {
2299 DRM_ERROR("Failed to allocate status page\n");
2300 return -ENOMEM;
2301 }
2302 obj_priv = obj->driver_private;
2303
2304 ret = i915_gem_object_pin(obj, 4096);
2305 if (ret != 0) {
2306 drm_gem_object_unreference(obj);
2307 return ret;
2308 }
2309
2310 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2311 dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2312 dev_priv->hws_map.size = 4096;
2313 dev_priv->hws_map.type = 0;
2314 dev_priv->hws_map.flags = 0;
2315 dev_priv->hws_map.mtrr = 0;
2316
2317 drm_core_ioremap(&dev_priv->hws_map, dev);
2318 if (dev_priv->hws_map.handle == NULL) {
2319 DRM_ERROR("Failed to map status page.\n");
2320 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2321 drm_gem_object_unreference(obj);
2322 return -EINVAL;
2323 }
2324 dev_priv->hws_obj = obj;
2325 dev_priv->hw_status_page = dev_priv->hws_map.handle;
2326 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2327 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2328 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2329
2330 return 0;
2331}
2332
2333static int
2334i915_gem_init_ringbuffer(struct drm_device *dev)
2335{
2336 drm_i915_private_t *dev_priv = dev->dev_private;
2337 struct drm_gem_object *obj;
2338 struct drm_i915_gem_object *obj_priv;
2339 int ret;
2340
2341 ret = i915_gem_init_hws(dev);
2342 if (ret != 0)
2343 return ret;
2344
2345 obj = drm_gem_object_alloc(dev, 128 * 1024);
2346 if (obj == NULL) {
2347 DRM_ERROR("Failed to allocate ringbuffer\n");
2348 return -ENOMEM;
2349 }
2350 obj_priv = obj->driver_private;
2351
2352 ret = i915_gem_object_pin(obj, 4096);
2353 if (ret != 0) {
2354 drm_gem_object_unreference(obj);
2355 return ret;
2356 }
2357
2358 /* Set up the kernel mapping for the ring. */
2359 dev_priv->ring.Size = obj->size;
2360 dev_priv->ring.tail_mask = obj->size - 1;
2361
2362 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2363 dev_priv->ring.map.size = obj->size;
2364 dev_priv->ring.map.type = 0;
2365 dev_priv->ring.map.flags = 0;
2366 dev_priv->ring.map.mtrr = 0;
2367
2368 drm_core_ioremap(&dev_priv->ring.map, dev);
2369 if (dev_priv->ring.map.handle == NULL) {
2370 DRM_ERROR("Failed to map ringbuffer.\n");
2371 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2372 drm_gem_object_unreference(obj);
2373 return -EINVAL;
2374 }
2375 dev_priv->ring.ring_obj = obj;
2376 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2377
2378 /* Stop the ring if it's running. */
2379 I915_WRITE(PRB0_CTL, 0);
2380 I915_WRITE(PRB0_HEAD, 0);
2381 I915_WRITE(PRB0_TAIL, 0);
2382 I915_WRITE(PRB0_START, 0);
2383
2384 /* Initialize the ring. */
2385 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2386 I915_WRITE(PRB0_CTL,
2387 ((obj->size - 4096) & RING_NR_PAGES) |
2388 RING_NO_REPORT |
2389 RING_VALID);
2390
2391 /* Update our cache of the ring state */
2392 i915_kernel_lost_context(dev);
2393
2394 return 0;
2395}
2396
2397static void
2398i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2399{
2400 drm_i915_private_t *dev_priv = dev->dev_private;
2401
2402 if (dev_priv->ring.ring_obj == NULL)
2403 return;
2404
2405 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2406
2407 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2408 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2409 dev_priv->ring.ring_obj = NULL;
2410 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2411
2412 if (dev_priv->hws_obj != NULL) {
2413 i915_gem_object_unpin(dev_priv->hws_obj);
2414 drm_gem_object_unreference(dev_priv->hws_obj);
2415 dev_priv->hws_obj = NULL;
2416 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2417
2418 /* Write high address into HWS_PGA when disabling. */
2419 I915_WRITE(HWS_PGA, 0x1ffff000);
2420 }
2421}
2422
2423int
2424i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2425 struct drm_file *file_priv)
2426{
2427 drm_i915_private_t *dev_priv = dev->dev_private;
2428 int ret;
2429
2430 if (dev_priv->mm.wedged) {
2431 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2432 dev_priv->mm.wedged = 0;
2433 }
2434
2435 ret = i915_gem_init_ringbuffer(dev);
2436 if (ret != 0)
2437 return ret;
2438
2439 mutex_lock(&dev->struct_mutex);
2440 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2441 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2442 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2443 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2444 dev_priv->mm.suspended = 0;
2445 mutex_unlock(&dev->struct_mutex);
2446 return 0;
2447}
2448
2449int
2450i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2451 struct drm_file *file_priv)
2452{
2453 int ret;
2454
2455 mutex_lock(&dev->struct_mutex);
2456 ret = i915_gem_idle(dev);
2457 if (ret == 0)
2458 i915_gem_cleanup_ringbuffer(dev);
2459 mutex_unlock(&dev->struct_mutex);
2460
2461 return 0;
2462}
2463
2464void
2465i915_gem_lastclose(struct drm_device *dev)
2466{
2467 int ret;
2468 drm_i915_private_t *dev_priv = dev->dev_private;
2469
2470 mutex_lock(&dev->struct_mutex);
2471
2472 if (dev_priv->ring.ring_obj != NULL) {
2473 ret = i915_gem_idle(dev);
2474 if (ret)
2475 DRM_ERROR("failed to idle hardware: %d\n", ret);
2476
2477 i915_gem_cleanup_ringbuffer(dev);
2478 }
2479
2480 mutex_unlock(&dev->struct_mutex);
2481}
2482
2483void
2484i915_gem_load(struct drm_device *dev)
2485{
2486 drm_i915_private_t *dev_priv = dev->dev_private;
2487
2488 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2489 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2490 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2491 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2492 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2493 i915_gem_retire_work_handler);
2494 dev_priv->mm.next_gem_seqno = 1;
2495
2496 i915_gem_detect_bit_6_swizzle(dev);
2497}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
new file mode 100644
index 000000000000..131c088f8c8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32
33#if WATCH_INACTIVE
34void
35i915_verify_inactive(struct drm_device *dev, char *file, int line)
36{
37 drm_i915_private_t *dev_priv = dev->dev_private;
38 struct drm_gem_object *obj;
39 struct drm_i915_gem_object *obj_priv;
40
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
42 obj = obj_priv->obj;
43 if (obj_priv->pin_count || obj_priv->active ||
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
45 I915_GEM_DOMAIN_GTT)))
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
47 obj,
48 obj_priv->pin_count, obj_priv->active,
49 obj->write_domain, file, line);
50 }
51}
52#endif /* WATCH_INACTIVE */
53
54
55#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
56static void
57i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
58 uint32_t bias, uint32_t mark)
59{
60 uint32_t *mem = kmap_atomic(page, KM_USER0);
61 int i;
62 for (i = start; i < end; i += 4)
63 DRM_INFO("%08x: %08x%s\n",
64 (int) (bias + i), mem[i / 4],
65 (bias + i == mark) ? " ********" : "");
66 kunmap_atomic(mem, KM_USER0);
67 /* give syslog time to catch up */
68 msleep(1);
69}
70
71void
72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark)
74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private;
76 int page;
77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
79 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
80 int page_len, chunk, chunk_len;
81
82 page_len = len - page * PAGE_SIZE;
83 if (page_len > PAGE_SIZE)
84 page_len = PAGE_SIZE;
85
86 for (chunk = 0; chunk < page_len; chunk += 128) {
87 chunk_len = page_len - chunk;
88 if (chunk_len > 128)
89 chunk_len = 128;
90 i915_gem_dump_page(obj_priv->page_list[page],
91 chunk, chunk + chunk_len,
92 obj_priv->gtt_offset +
93 page * PAGE_SIZE,
94 mark);
95 }
96 }
97}
98#endif
99
100#if WATCH_LRU
101void
102i915_dump_lru(struct drm_device *dev, const char *where)
103{
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct drm_i915_gem_object *obj_priv;
106
107 DRM_INFO("active list %s {\n", where);
108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
109 list)
110 {
111 DRM_INFO(" %p: %08x\n", obj_priv,
112 obj_priv->last_rendering_seqno);
113 }
114 DRM_INFO("}\n");
115 DRM_INFO("flushing list %s {\n", where);
116 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
117 list)
118 {
119 DRM_INFO(" %p: %08x\n", obj_priv,
120 obj_priv->last_rendering_seqno);
121 }
122 DRM_INFO("}\n");
123 DRM_INFO("inactive %s {\n", where);
124 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
125 DRM_INFO(" %p: %08x\n", obj_priv,
126 obj_priv->last_rendering_seqno);
127 }
128 DRM_INFO("}\n");
129}
130#endif
131
132
133#if WATCH_COHERENCY
134void
135i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
136{
137 struct drm_device *dev = obj->dev;
138 struct drm_i915_gem_object *obj_priv = obj->driver_private;
139 int page;
140 uint32_t *gtt_mapping;
141 uint32_t *backing_map = NULL;
142 int bad_count = 0;
143
144 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
145 __func__, obj, obj_priv->gtt_offset, handle,
146 obj->size / 1024);
147
148 gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
149 obj->size);
150 if (gtt_mapping == NULL) {
151 DRM_ERROR("failed to map GTT space\n");
152 return;
153 }
154
155 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
156 int i;
157
158 backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
159
160 if (backing_map == NULL) {
161 DRM_ERROR("failed to map backing page\n");
162 goto out;
163 }
164
165 for (i = 0; i < PAGE_SIZE / 4; i++) {
166 uint32_t cpuval = backing_map[i];
167 uint32_t gttval = readl(gtt_mapping +
168 page * 1024 + i);
169
170 if (cpuval != gttval) {
171 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
172 "0x%08x vs 0x%08x\n",
173 (int)(obj_priv->gtt_offset +
174 page * PAGE_SIZE + i * 4),
175 cpuval, gttval);
176 if (bad_count++ >= 8) {
177 DRM_INFO("...\n");
178 goto out;
179 }
180 }
181 }
182 kunmap_atomic(backing_map, KM_USER0);
183 backing_map = NULL;
184 }
185
186 out:
187 if (backing_map != NULL)
188 kunmap_atomic(backing_map, KM_USER0);
189 iounmap(gtt_mapping);
190
191 /* give syslog time to catch up */
192 msleep(1);
193
194 /* Directly flush the object, since we just loaded values with the CPU
195 * from the backing pages and we don't want to disturb the cache
196 * management that we're trying to observe.
197 */
198
199 i915_gem_clflush_object(obj);
200}
201#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
new file mode 100644
index 000000000000..15d4160415b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies),
172 gem_request->flush_domains);
173 }
174 if (len > request + offset)
175 return request;
176 *eof = 1;
177 return len - offset;
178}
179
180static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
181 int request, int *eof, void *data)
182{
183 struct drm_minor *minor = (struct drm_minor *) data;
184 struct drm_device *dev = minor->dev;
185 drm_i915_private_t *dev_priv = dev->dev_private;
186 int len = 0;
187
188 if (offset > DRM_PROC_LIMIT) {
189 *eof = 1;
190 return 0;
191 }
192
193 *start = &buf[offset];
194 *eof = 0;
195 DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
196 DRM_PROC_PRINT("Waiter sequence: %d\n",
197 dev_priv->mm.waiting_gem_seqno);
198 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
199 if (len > request + offset)
200 return request;
201 *eof = 1;
202 return len - offset;
203}
204
205
206static int i915_interrupt_info(char *buf, char **start, off_t offset,
207 int request, int *eof, void *data)
208{
209 struct drm_minor *minor = (struct drm_minor *) data;
210 struct drm_device *dev = minor->dev;
211 drm_i915_private_t *dev_priv = dev->dev_private;
212 int len = 0;
213
214 if (offset > DRM_PROC_LIMIT) {
215 *eof = 1;
216 return 0;
217 }
218
219 *start = &buf[offset];
220 *eof = 0;
221 DRM_PROC_PRINT("Interrupt enable: %08x\n",
222 I915_READ(IER));
223 DRM_PROC_PRINT("Interrupt identity: %08x\n",
224 I915_READ(IIR));
225 DRM_PROC_PRINT("Interrupt mask: %08x\n",
226 I915_READ(IMR));
227 DRM_PROC_PRINT("Pipe A stat: %08x\n",
228 I915_READ(PIPEASTAT));
229 DRM_PROC_PRINT("Pipe B stat: %08x\n",
230 I915_READ(PIPEBSTAT));
231 DRM_PROC_PRINT("Interrupts received: %d\n",
232 atomic_read(&dev_priv->irq_received));
233 DRM_PROC_PRINT("Current sequence: %d\n",
234 i915_get_gem_seqno(dev));
235 DRM_PROC_PRINT("Waiter sequence: %d\n",
236 dev_priv->mm.waiting_gem_seqno);
237 DRM_PROC_PRINT("IRQ sequence: %d\n",
238 dev_priv->mm.irq_gem_seqno);
239 if (len > request + offset)
240 return request;
241 *eof = 1;
242 return len - offset;
243}
244
245static struct drm_proc_list {
246 /** file name */
247 const char *name;
248 /** proc callback*/
249 int (*f) (char *, char **, off_t, int, int *, void *);
250} i915_gem_proc_list[] = {
251 {"i915_gem_active", i915_gem_active_info},
252 {"i915_gem_flushing", i915_gem_flushing_info},
253 {"i915_gem_inactive", i915_gem_inactive_info},
254 {"i915_gem_request", i915_gem_request_info},
255 {"i915_gem_seqno", i915_gem_seqno_info},
256 {"i915_gem_interrupt", i915_interrupt_info},
257};
258
259#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
260
261int i915_gem_proc_init(struct drm_minor *minor)
262{
263 struct proc_dir_entry *ent;
264 int i, j;
265
266 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
267 ent = create_proc_entry(i915_gem_proc_list[i].name,
268 S_IFREG | S_IRUGO, minor->dev_root);
269 if (!ent) {
270 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
271 i915_gem_proc_list[i].name);
272 for (j = 0; j < i; j++)
273 remove_proc_entry(i915_gem_proc_list[i].name,
274 minor->dev_root);
275 return -1;
276 }
277 ent->read_proc = i915_gem_proc_list[i].f;
278 ent->data = minor;
279 }
280 return 0;
281}
282
283void i915_gem_proc_cleanup(struct drm_minor *minor)
284{
285 int i;
286
287 if (!minor->dev_root)
288 return;
289
290 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
291 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
292}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
new file mode 100644
index 000000000000..0c1b3a0834e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,256 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32
33/** @file i915_gem_tiling.c
34 *
35 * Support for managing tiling state of buffer objects.
36 *
37 * The idea behind tiling is to increase cache hit rates by rearranging
38 * pixel data so that a group of pixel accesses are in the same cacheline.
39 * Performance improvement from doing this on the back/depth buffer are on
40 * the order of 30%.
41 *
42 * Intel architectures make this somewhat more complicated, though, by
43 * adjustments made to addressing of data when the memory is in interleaved
44 * mode (matched pairs of DIMMS) to improve memory bandwidth.
45 * For interleaved memory, the CPU sends every sequential 64 bytes
46 * to an alternate memory channel so it can get the bandwidth from both.
47 *
48 * The GPU also rearranges its accesses for increased bandwidth to interleaved
49 * memory, and it matches what the CPU does for non-tiled. However, when tiled
50 * it does it a little differently, since one walks addresses not just in the
51 * X direction but also Y. So, along with alternating channels when bit
52 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
53 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
54 * are common to both the 915 and 965-class hardware.
55 *
56 * The CPU also sometimes XORs in higher bits as well, to improve
57 * bandwidth doing strided access like we do so frequently in graphics. This
58 * is called "Channel XOR Randomization" in the MCH documentation. The result
59 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
60 * decode.
61 *
62 * All of this bit 6 XORing has an effect on our memory management,
63 * as we need to make sure that the 3d driver can correctly address object
64 * contents.
65 *
66 * If we don't have interleaved memory, all tiling is safe and no swizzling is
67 * required.
68 *
69 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
70 * 17 is not just a page offset, so as we page an objet out and back in,
71 * individual pages in it will have different bit 17 addresses, resulting in
72 * each 64 bytes being swapped with its neighbor!
73 *
74 * Otherwise, if interleaved, we have to tell the 3d driver what the address
75 * swizzling it needs to do is, since it's writing with the CPU to the pages
76 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
77 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
78 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
79 * to match what the GPU expects.
80 */
81
82/**
83 * Detects bit 6 swizzling of address lookup between IGD access and CPU
84 * access through main memory.
85 */
86void
87i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
88{
89 drm_i915_private_t *dev_priv = dev->dev_private;
90 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
91 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
92
93 if (!IS_I9XX(dev)) {
94 /* As far as we know, the 865 doesn't have these bit 6
95 * swizzling issues.
96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
100 uint32_t dcc;
101
102 /* On 915-945 and GM965, channel interleave by the CPU is
103 * determined by DCC. The CPU will alternate based on bit 6
104 * in interleaved mode, and the GPU will then also alternate
105 * on bit 6, 9, and 10 for X, but the CPU may also optionally
106 * alternate based on bit 17 (XOR not disabled and XOR
107 * bit == 17).
108 */
109 dcc = I915_READ(DCC);
110 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
111 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
112 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
113 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
114 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
115 break;
116 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
117 if (IS_I915G(dev) || IS_I915GM(dev) ||
118 dcc & DCC_CHANNEL_XOR_DISABLE) {
119 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
120 swizzle_y = I915_BIT_6_SWIZZLE_9;
121 } else if (IS_I965GM(dev)) {
122 /* GM965 only does bit 11-based channel
123 * randomization
124 */
125 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
126 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
127 } else {
128 /* Bit 17 or perhaps other swizzling */
129 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
130 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
131 }
132 break;
133 }
134 if (dcc == 0xffffffff) {
135 DRM_ERROR("Couldn't read from MCHBAR. "
136 "Disabling tiling.\n");
137 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
138 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
139 }
140 } else {
141 /* The 965, G33, and newer, have a very flexible memory
142 * configuration. It will enable dual-channel mode
143 * (interleaving) on as much memory as it can, and the GPU
144 * will additionally sometimes enable different bit 6
145 * swizzling for tiled objects from the CPU.
146 *
147 * Here's what I found on the G965:
148 * slot fill memory size swizzling
149 * 0A 0B 1A 1B 1-ch 2-ch
150 * 512 0 0 0 512 0 O
151 * 512 0 512 0 16 1008 X
152 * 512 0 0 512 16 1008 X
153 * 0 512 0 512 16 1008 X
154 * 1024 1024 1024 0 2048 1024 O
155 *
156 * We could probably detect this based on either the DRB
157 * matching, which was the case for the swizzling required in
158 * the table above, or from the 1-ch value being less than
159 * the minimum size of a rank.
160 */
161 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
162 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
163 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
164 } else {
165 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
166 swizzle_y = I915_BIT_6_SWIZZLE_9;
167 }
168 }
169
170 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
171 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
172}
173
174/**
175 * Sets the tiling mode of an object, returning the required swizzling of
176 * bit 6 of addresses in the object.
177 */
178int
179i915_gem_set_tiling(struct drm_device *dev, void *data,
180 struct drm_file *file_priv)
181{
182 struct drm_i915_gem_set_tiling *args = data;
183 drm_i915_private_t *dev_priv = dev->dev_private;
184 struct drm_gem_object *obj;
185 struct drm_i915_gem_object *obj_priv;
186
187 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
188 if (obj == NULL)
189 return -EINVAL;
190 obj_priv = obj->driver_private;
191
192 mutex_lock(&dev->struct_mutex);
193
194 if (args->tiling_mode == I915_TILING_NONE) {
195 obj_priv->tiling_mode = I915_TILING_NONE;
196 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
197 } else {
198 if (args->tiling_mode == I915_TILING_X)
199 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
200 else
201 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
202 /* If we can't handle the swizzling, make it untiled. */
203 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
204 args->tiling_mode = I915_TILING_NONE;
205 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
206 }
207 }
208 obj_priv->tiling_mode = args->tiling_mode;
209
210 mutex_unlock(&dev->struct_mutex);
211
212 drm_gem_object_unreference(obj);
213
214 return 0;
215}
216
217/**
218 * Returns the current tiling mode and required bit 6 swizzling for the object.
219 */
220int
221i915_gem_get_tiling(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct drm_i915_gem_get_tiling *args = data;
225 drm_i915_private_t *dev_priv = dev->dev_private;
226 struct drm_gem_object *obj;
227 struct drm_i915_gem_object *obj_priv;
228
229 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
230 if (obj == NULL)
231 return -EINVAL;
232 obj_priv = obj->driver_private;
233
234 mutex_lock(&dev->struct_mutex);
235
236 args->tiling_mode = obj_priv->tiling_mode;
237 switch (obj_priv->tiling_mode) {
238 case I915_TILING_X:
239 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
240 break;
241 case I915_TILING_Y:
242 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
243 break;
244 case I915_TILING_NONE:
245 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
246 break;
247 default:
248 DRM_ERROR("unknown tiling mode\n");
249 }
250
251 mutex_unlock(&dev->struct_mutex);
252
253 drm_gem_object_unreference(obj);
254
255 return 0;
256}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f8759597233b..f295bdf16e2d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -407,15 +407,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
407 I915_WRITE(PIPEBSTAT, pipeb_stats); 407 I915_WRITE(PIPEBSTAT, pipeb_stats);
408 } 408 }
409 409
410 if (iir & I915_ASLE_INTERRUPT) 410 I915_WRITE(IIR, iir);
411 opregion_asle_intr(dev); 411 if (dev->pdev->msi_enabled)
412 I915_WRITE(IMR, dev_priv->irq_mask_reg);
413 (void) I915_READ(IIR); /* Flush posted writes */
412 414
413 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 415 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
414 416
415 if (dev->pdev->msi_enabled) 417 if (iir & I915_USER_INTERRUPT) {
416 I915_WRITE(IMR, dev_priv->irq_mask_reg); 418 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
417 I915_WRITE(IIR, iir); 419 DRM_WAKEUP(&dev_priv->irq_queue);
418 (void) I915_READ(IIR); 420 }
421
422 if (iir & I915_ASLE_INTERRUPT)
423 opregion_asle_intr(dev);
419 424
420 if (vblank && dev_priv->swaps_pending > 0) 425 if (vblank && dev_priv->swaps_pending > 0)
421 drm_locked_tasklet(dev, i915_vblank_tasklet); 426 drm_locked_tasklet(dev, i915_vblank_tasklet);
@@ -449,7 +454,7 @@ static int i915_emit_irq(struct drm_device * dev)
449 return dev_priv->counter; 454 return dev_priv->counter;
450} 455}
451 456
452static void i915_user_irq_get(struct drm_device *dev) 457void i915_user_irq_get(struct drm_device *dev)
453{ 458{
454 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 459 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
455 460
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 43ad2cb8b9f5..5c2d9f206d05 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,19 +25,6 @@
25#ifndef _I915_REG_H_ 25#ifndef _I915_REG_H_
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28/* MCH MMIO space */
29/** 915-945 and GM965 MCH register controlling DRAM channel access */
30#define DCC 0x200
31#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
32#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
33#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
34#define DCC_ADDRESSING_MODE_MASK (3 << 0)
35#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
36
37/** 965 MCH register controlling DRAM channel configuration */
38#define CHDECMISC 0x111
39#define CHDECMISC_FLEXMEMORY (1 << 1)
40
41/* 28/*
42 * The Bridge device's PCI config space has information about the 29 * The Bridge device's PCI config space has information about the
43 * fb aperture size and the amount of pre-reserved memory. 30 * fb aperture size and the amount of pre-reserved memory.
@@ -516,6 +503,30 @@
516#define PALETTE_A 0x0a000 503#define PALETTE_A 0x0a000
517#define PALETTE_B 0x0a800 504#define PALETTE_B 0x0a800
518 505
506/* MCH MMIO space */
507
508/*
509 * MCHBAR mirror.
510 *
511 * This mirrors the MCHBAR MMIO space whose location is determined by
512 * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
513 * every way. It is not accessible from the CP register read instructions.
514 *
515 */
516#define MCHBAR_MIRROR_BASE 0x10000
517
518/** 915-945 and GM965 MCH register controlling DRAM channel access */
519#define DCC 0x10200
520#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
521#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525
526/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206
528#define C1DRB3 0x10606
529
519/* 530/*
520 * Overlay regs 531 * Overlay regs
521 */ 532 */
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 15e55039b7f1..f46ba4b57da4 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -570,6 +570,34 @@ struct drm_set_version {
570 int drm_dd_minor; 570 int drm_dd_minor;
571}; 571};
572 572
573/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
574struct drm_gem_close {
575 /** Handle of the object to be closed. */
576 uint32_t handle;
577 uint32_t pad;
578};
579
580/** DRM_IOCTL_GEM_FLINK ioctl argument type */
581struct drm_gem_flink {
582 /** Handle for the object being named */
583 uint32_t handle;
584
585 /** Returned global name */
586 uint32_t name;
587};
588
589/** DRM_IOCTL_GEM_OPEN ioctl argument type */
590struct drm_gem_open {
591 /** Name of object being opened */
592 uint32_t name;
593
594 /** Returned handle for the object */
595 uint32_t handle;
596
597 /** Returned size of the object */
598 uint64_t size;
599};
600
573#define DRM_IOCTL_BASE 'd' 601#define DRM_IOCTL_BASE 'd'
574#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 602#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
575#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 603#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@@ -585,6 +613,9 @@ struct drm_set_version {
585#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 613#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
586#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 614#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
587#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 615#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
616#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
617#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
618#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
588 619
589#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 620#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
590#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 621#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e79ce0781f0b..1469a1bd8821 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,7 @@ struct drm_device;
104#define DRIVER_DMA_QUEUE 0x200 104#define DRIVER_DMA_QUEUE 0x200
105#define DRIVER_FB_DMA 0x400 105#define DRIVER_FB_DMA 0x400
106#define DRIVER_IRQ_VBL2 0x800 106#define DRIVER_IRQ_VBL2 0x800
107#define DRIVER_GEM 0x1000
107 108
108/***********************************************************************/ 109/***********************************************************************/
109/** \name Begin the DRM... */ 110/** \name Begin the DRM... */
@@ -387,6 +388,10 @@ struct drm_file {
387 struct drm_minor *minor; 388 struct drm_minor *minor;
388 int remove_auth_on_close; 389 int remove_auth_on_close;
389 unsigned long lock_count; 390 unsigned long lock_count;
391 /** Mapping of mm object handles to object pointers. */
392 struct idr object_idr;
393 /** Lock for synchronization of access to object_idr. */
394 spinlock_t table_lock;
390 struct file *filp; 395 struct file *filp;
391 void *driver_priv; 396 void *driver_priv;
392}; 397};
@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
558}; 563};
559 564
560/** 565/**
566 * This structure defines the drm_mm memory object, which will be used by the
567 * DRM for its buffer objects.
568 */
569struct drm_gem_object {
570 /** Reference count of this object */
571 struct kref refcount;
572
573 /** Handle count of this object. Each handle also holds a reference */
574 struct kref handlecount;
575
576 /** Related drm device */
577 struct drm_device *dev;
578
579 /** File representing the shmem storage */
580 struct file *filp;
581
582 /**
583 * Size of the object, in bytes. Immutable over the object's
584 * lifetime.
585 */
586 size_t size;
587
588 /**
589 * Global name for this object, starts at 1. 0 means unnamed.
590 * Access is covered by the object_name_lock in the related drm_device
591 */
592 int name;
593
594 /**
595 * Memory domains. These monitor which caches contain read/write data
596 * related to the object. When transitioning from one set of domains
597 * to another, the driver is called to ensure that caches are suitably
598 * flushed and invalidated
599 */
600 uint32_t read_domains;
601 uint32_t write_domain;
602
603 /**
604 * While validating an exec operation, the
605 * new read/write domain values are computed here.
606 * They will be transferred to the above values
607 * at the point that any cache flushing occurs
608 */
609 uint32_t pending_read_domains;
610 uint32_t pending_write_domain;
611
612 void *driver_private;
613};
614
615/**
561 * DRM driver structure. This structure represent the common code for 616 * DRM driver structure. This structure represent the common code for
562 * a family of cards. There will one drm_device for each card present 617 * a family of cards. There will one drm_device for each card present
563 * in this family 618 * in this family
@@ -657,6 +712,18 @@ struct drm_driver {
657 void (*set_version) (struct drm_device *dev, 712 void (*set_version) (struct drm_device *dev,
658 struct drm_set_version *sv); 713 struct drm_set_version *sv);
659 714
715 int (*proc_init)(struct drm_minor *minor);
716 void (*proc_cleanup)(struct drm_minor *minor);
717
718 /**
719 * Driver-specific constructor for drm_gem_objects, to set up
720 * obj->driver_private.
721 *
722 * Returns 0 on success.
723 */
724 int (*gem_init_object) (struct drm_gem_object *obj);
725 void (*gem_free_object) (struct drm_gem_object *obj);
726
660 int major; 727 int major;
661 int minor; 728 int minor;
662 int patchlevel; 729 int patchlevel;
@@ -830,6 +897,22 @@ struct drm_device {
830 spinlock_t drw_lock; 897 spinlock_t drw_lock;
831 struct idr drw_idr; 898 struct idr drw_idr;
832 /*@} */ 899 /*@} */
900
901 /** \name GEM information */
902 /*@{ */
903 spinlock_t object_name_lock;
904 struct idr object_name_idr;
905 atomic_t object_count;
906 atomic_t object_memory;
907 atomic_t pin_count;
908 atomic_t pin_memory;
909 atomic_t gtt_count;
910 atomic_t gtt_memory;
911 uint32_t gtt_total;
912 uint32_t invalidate_domains; /* domains pending invalidation */
913 uint32_t flush_domains; /* domains pending flush */
914 /*@} */
915
833}; 916};
834 917
835static __inline__ int drm_core_check_feature(struct drm_device *dev, 918static __inline__ int drm_core_check_feature(struct drm_device *dev,
@@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
926extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 1009extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
927extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 1010extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
928extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 1011extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
1012extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1013 struct page **pages,
1014 unsigned long num_pages,
1015 uint32_t gtt_offset);
929extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1016extern int drm_unbind_agp(DRM_AGP_MEM * handle);
930 1017
931 /* Misc. IOCTL support (drm_ioctl.h) */ 1018 /* Misc. IOCTL support (drm_ioctl.h) */
@@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
988extern int drm_authmagic(struct drm_device *dev, void *data, 1075extern int drm_authmagic(struct drm_device *dev, void *data,
989 struct drm_file *file_priv); 1076 struct drm_file *file_priv);
990 1077
1078/* Cache management (drm_cache.c) */
1079void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1080
991 /* Locking IOCTL support (drm_lock.h) */ 1081 /* Locking IOCTL support (drm_lock.h) */
992extern int drm_lock(struct drm_device *dev, void *data, 1082extern int drm_lock(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 1083 struct drm_file *file_priv);
@@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
1094extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1184extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1095extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1185extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1096extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1186extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
1187extern void drm_agp_chipset_flush(struct drm_device *dev);
1097 1188
1098 /* Stub support (drm_stub.h) */ 1189 /* Stub support (drm_stub.h) */
1099extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1190extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1156extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1247extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1157extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1248extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1158 1249
1250/* Graphics Execution Manager library functions (drm_gem.c) */
1251int drm_gem_init(struct drm_device *dev);
1252void drm_gem_object_free(struct kref *kref);
1253struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1254 size_t size);
1255void drm_gem_object_handle_free(struct kref *kref);
1256
1257static inline void
1258drm_gem_object_reference(struct drm_gem_object *obj)
1259{
1260 kref_get(&obj->refcount);
1261}
1262
1263static inline void
1264drm_gem_object_unreference(struct drm_gem_object *obj)
1265{
1266 if (obj == NULL)
1267 return;
1268
1269 kref_put(&obj->refcount, drm_gem_object_free);
1270}
1271
1272int drm_gem_handle_create(struct drm_file *file_priv,
1273 struct drm_gem_object *obj,
1274 int *handlep);
1275
1276static inline void
1277drm_gem_object_handle_reference(struct drm_gem_object *obj)
1278{
1279 drm_gem_object_reference(obj);
1280 kref_get(&obj->handlecount);
1281}
1282
1283static inline void
1284drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1285{
1286 if (obj == NULL)
1287 return;
1288
1289 /*
1290 * Must bump handle count first as this may be the last
1291 * ref, in which case the object would disappear before we
1292 * checked for a name
1293 */
1294 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1295 drm_gem_object_unreference(obj);
1296}
1297
1298struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1299 struct drm_file *filp,
1300 int handle);
1301int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1302 struct drm_file *file_priv);
1303int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1304 struct drm_file *file_priv);
1305int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1306 struct drm_file *file_priv);
1307void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1308void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1309
1159extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1310extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1160extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); 1311extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
1161extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); 1312extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 05c66cf03a9e..59d08fca25a4 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
143#define DRM_I915_GET_VBLANK_PIPE 0x0e 143#define DRM_I915_GET_VBLANK_PIPE 0x0e
144#define DRM_I915_VBLANK_SWAP 0x0f 144#define DRM_I915_VBLANK_SWAP 0x0f
145#define DRM_I915_HWS_ADDR 0x11 145#define DRM_I915_HWS_ADDR 0x11
146#define DRM_I915_GEM_INIT 0x13
147#define DRM_I915_GEM_EXECBUFFER 0x14
148#define DRM_I915_GEM_PIN 0x15
149#define DRM_I915_GEM_UNPIN 0x16
150#define DRM_I915_GEM_BUSY 0x17
151#define DRM_I915_GEM_THROTTLE 0x18
152#define DRM_I915_GEM_ENTERVT 0x19
153#define DRM_I915_GEM_LEAVEVT 0x1a
154#define DRM_I915_GEM_CREATE 0x1b
155#define DRM_I915_GEM_PREAD 0x1c
156#define DRM_I915_GEM_PWRITE 0x1d
157#define DRM_I915_GEM_MMAP 0x1e
158#define DRM_I915_GEM_SET_DOMAIN 0x1f
159#define DRM_I915_GEM_SW_FINISH 0x20
160#define DRM_I915_GEM_SET_TILING 0x21
161#define DRM_I915_GEM_GET_TILING 0x22
146 162
147#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 163#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
148#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 164#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
160#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
161#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
162#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 178#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
179#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
180#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
181#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
182#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
183#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
184#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
185#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
186#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
187#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
188#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
189#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
190#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
191#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
192#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
163 193
164/* Allow drivers to submit batchbuffers directly to hardware, relying 194/* Allow drivers to submit batchbuffers directly to hardware, relying
165 * on the security mechanisms provided by hardware. 195 * on the security mechanisms provided by hardware.
@@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait {
200#define I915_PARAM_IRQ_ACTIVE 1 230#define I915_PARAM_IRQ_ACTIVE 1
201#define I915_PARAM_ALLOW_BATCHBUFFER 2 231#define I915_PARAM_ALLOW_BATCHBUFFER 2
202#define I915_PARAM_LAST_DISPATCH 3 232#define I915_PARAM_LAST_DISPATCH 3
233#define I915_PARAM_HAS_GEM 5
203 234
204typedef struct drm_i915_getparam { 235typedef struct drm_i915_getparam {
205 int param; 236 int param;
@@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr {
267 uint64_t addr; 298 uint64_t addr;
268} drm_i915_hws_addr_t; 299} drm_i915_hws_addr_t;
269 300
301struct drm_i915_gem_init {
302 /**
303 * Beginning offset in the GTT to be managed by the DRM memory
304 * manager.
305 */
306 uint64_t gtt_start;
307 /**
308 * Ending offset in the GTT to be managed by the DRM memory
309 * manager.
310 */
311 uint64_t gtt_end;
312};
313
314struct drm_i915_gem_create {
315 /**
316 * Requested size for the object.
317 *
318 * The (page-aligned) allocated size for the object will be returned.
319 */
320 uint64_t size;
321 /**
322 * Returned handle for the object.
323 *
324 * Object handles are nonzero.
325 */
326 uint32_t handle;
327 uint32_t pad;
328};
329
330struct drm_i915_gem_pread {
331 /** Handle for the object being read. */
332 uint32_t handle;
333 uint32_t pad;
334 /** Offset into the object to read from */
335 uint64_t offset;
336 /** Length of data to read */
337 uint64_t size;
338 /**
339 * Pointer to write the data into.
340 *
341 * This is a fixed-size type for 32/64 compatibility.
342 */
343 uint64_t data_ptr;
344};
345
346struct drm_i915_gem_pwrite {
347 /** Handle for the object being written to. */
348 uint32_t handle;
349 uint32_t pad;
350 /** Offset into the object to write to */
351 uint64_t offset;
352 /** Length of data to write */
353 uint64_t size;
354 /**
355 * Pointer to read the data from.
356 *
357 * This is a fixed-size type for 32/64 compatibility.
358 */
359 uint64_t data_ptr;
360};
361
362struct drm_i915_gem_mmap {
363 /** Handle for the object being mapped. */
364 uint32_t handle;
365 uint32_t pad;
366 /** Offset in the object to map. */
367 uint64_t offset;
368 /**
369 * Length of data to map.
370 *
371 * The value will be page-aligned.
372 */
373 uint64_t size;
374 /**
375 * Returned pointer the data was mapped at.
376 *
377 * This is a fixed-size type for 32/64 compatibility.
378 */
379 uint64_t addr_ptr;
380};
381
382struct drm_i915_gem_set_domain {
383 /** Handle for the object */
384 uint32_t handle;
385
386 /** New read domains */
387 uint32_t read_domains;
388
389 /** New write domain */
390 uint32_t write_domain;
391};
392
393struct drm_i915_gem_sw_finish {
394 /** Handle for the object */
395 uint32_t handle;
396};
397
398struct drm_i915_gem_relocation_entry {
399 /**
400 * Handle of the buffer being pointed to by this relocation entry.
401 *
402 * It's appealing to make this be an index into the mm_validate_entry
403 * list to refer to the buffer, but this allows the driver to create
404 * a relocation list for state buffers and not re-write it per
405 * exec using the buffer.
406 */
407 uint32_t target_handle;
408
409 /**
410 * Value to be added to the offset of the target buffer to make up
411 * the relocation entry.
412 */
413 uint32_t delta;
414
415 /** Offset in the buffer the relocation entry will be written into */
416 uint64_t offset;
417
418 /**
419 * Offset value of the target buffer that the relocation entry was last
420 * written as.
421 *
422 * If the buffer has the same offset as last time, we can skip syncing
423 * and writing the relocation. This value is written back out by
424 * the execbuffer ioctl when the relocation is written.
425 */
426 uint64_t presumed_offset;
427
428 /**
429 * Target memory domains read by this operation.
430 */
431 uint32_t read_domains;
432
433 /**
434 * Target memory domains written by this operation.
435 *
436 * Note that only one domain may be written by the whole
437 * execbuffer operation, so that where there are conflicts,
438 * the application will get -EINVAL back.
439 */
440 uint32_t write_domain;
441};
442
443/** @{
444 * Intel memory domains
445 *
446 * Most of these just align with the various caches in
447 * the system and are used to flush and invalidate as
448 * objects end up cached in different domains.
449 */
450/** CPU cache */
451#define I915_GEM_DOMAIN_CPU 0x00000001
452/** Render cache, used by 2D and 3D drawing */
453#define I915_GEM_DOMAIN_RENDER 0x00000002
454/** Sampler cache, used by texture engine */
455#define I915_GEM_DOMAIN_SAMPLER 0x00000004
456/** Command queue, used to load batch buffers */
457#define I915_GEM_DOMAIN_COMMAND 0x00000008
458/** Instruction cache, used by shader programs */
459#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
460/** Vertex address cache */
461#define I915_GEM_DOMAIN_VERTEX 0x00000020
462/** GTT domain - aperture and scanout */
463#define I915_GEM_DOMAIN_GTT 0x00000040
464/** @} */
465
466struct drm_i915_gem_exec_object {
467 /**
468 * User's handle for a buffer to be bound into the GTT for this
469 * operation.
470 */
471 uint32_t handle;
472
473 /** Number of relocations to be performed on this buffer */
474 uint32_t relocation_count;
475 /**
476 * Pointer to array of struct drm_i915_gem_relocation_entry containing
477 * the relocations to be performed in this buffer.
478 */
479 uint64_t relocs_ptr;
480
481 /** Required alignment in graphics aperture */
482 uint64_t alignment;
483
484 /**
485 * Returned value of the updated offset of the object, for future
486 * presumed_offset writes.
487 */
488 uint64_t offset;
489};
490
491struct drm_i915_gem_execbuffer {
492 /**
493 * List of buffers to be validated with their relocations to be
494 * performend on them.
495 *
496 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
497 *
498 * These buffers must be listed in an order such that all relocations
499 * a buffer is performing refer to buffers that have already appeared
500 * in the validate list.
501 */
502 uint64_t buffers_ptr;
503 uint32_t buffer_count;
504
505 /** Offset in the batchbuffer to start execution from. */
506 uint32_t batch_start_offset;
507 /** Bytes used in batchbuffer from batch_start_offset */
508 uint32_t batch_len;
509 uint32_t DR1;
510 uint32_t DR4;
511 uint32_t num_cliprects;
512 /** This is a struct drm_clip_rect *cliprects */
513 uint64_t cliprects_ptr;
514};
515
516struct drm_i915_gem_pin {
517 /** Handle of the buffer to be pinned. */
518 uint32_t handle;
519 uint32_t pad;
520
521 /** alignment required within the aperture */
522 uint64_t alignment;
523
524 /** Returned GTT offset of the buffer. */
525 uint64_t offset;
526};
527
528struct drm_i915_gem_unpin {
529 /** Handle of the buffer to be unpinned. */
530 uint32_t handle;
531 uint32_t pad;
532};
533
534struct drm_i915_gem_busy {
535 /** Handle of the buffer to check for busy */
536 uint32_t handle;
537
538 /** Return busy status (1 if busy, 0 if idle) */
539 uint32_t busy;
540};
541
542#define I915_TILING_NONE 0
543#define I915_TILING_X 1
544#define I915_TILING_Y 2
545
546#define I915_BIT_6_SWIZZLE_NONE 0
547#define I915_BIT_6_SWIZZLE_9 1
548#define I915_BIT_6_SWIZZLE_9_10 2
549#define I915_BIT_6_SWIZZLE_9_11 3
550#define I915_BIT_6_SWIZZLE_9_10_11 4
551/* Not seen by userland */
552#define I915_BIT_6_SWIZZLE_UNKNOWN 5
553
554struct drm_i915_gem_set_tiling {
555 /** Handle of the buffer to have its tiling state updated */
556 uint32_t handle;
557
558 /**
559 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
560 * I915_TILING_Y).
561 *
562 * This value is to be set on request, and will be updated by the
563 * kernel on successful return with the actual chosen tiling layout.
564 *
565 * The tiling mode may be demoted to I915_TILING_NONE when the system
566 * has bit 6 swizzling that can't be managed correctly by GEM.
567 *
568 * Buffer contents become undefined when changing tiling_mode.
569 */
570 uint32_t tiling_mode;
571
572 /**
573 * Stride in bytes for the object when in I915_TILING_X or
574 * I915_TILING_Y.
575 */
576 uint32_t stride;
577
578 /**
579 * Returned address bit 6 swizzling required for CPU access through
580 * mmap mapping.
581 */
582 uint32_t swizzle_mode;
583};
584
585struct drm_i915_gem_get_tiling {
586 /** Handle of the buffer to get tiling state for. */
587 uint32_t handle;
588
589 /**
590 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
591 * I915_TILING_Y).
592 */
593 uint32_t tiling_mode;
594
595 /**
596 * Returned address bit 6 swizzling required for CPU access through
597 * mmap mapping.
598 */
599 uint32_t swizzle_mode;
600};
601
270#endif /* _I915_DRM_H_ */ 602#endif /* _I915_DRM_H_ */