aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-05 00:44:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-05 00:44:40 -0500
commit6df944c5f8194a1010f7166bcdd48667637f1af8 (patch)
tree033c8c9bf599d0759c5fb1b25e9edd4dfa3728ce /drivers
parent341e55805d671948c2a93d3d6c7e9babb47de5d8 (diff)
parent0235439232cb6f8a54f8976aa8330c1c98ebad0b (diff)
Merge branch 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm/i915: Return error in i915_gem_set_to_gtt_domain if we're not in the GTT. drm/i915: Retry execbuffer pinning after clearing the GTT drm/i915: Move the execbuffer domain computations together drm/i915: Rename object_set_domain to object_set_to_gpu_domain drm/i915: Make a single set-to-cpu-domain path and use it wherever needed. drm/i915: Make a single set-to-gtt-domain path. drm/i915: If interrupted while setting object domains, still emit the flush. drm/i915: Move flushing list cleanup from flush request retire to request emit. drm/i915: Respect GM965/GM45 bit-17-instead-of-bit-11 option for swizzling.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c637
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
5 files changed, 420 insertions, 245 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0ec..adc972cc6bfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
244 * List of objects currently involved in rendering from the 244 * List of objects currently involved in rendering from the
245 * ringbuffer. 245 * ringbuffer.
246 * 246 *
247 * Includes buffers having the contents of their GPU caches
248 * flushed, not necessarily primitives. last_rendering_seqno
249 * represents when the rendering involved will be completed.
250 *
247 * A reference is held on the buffer while on this list. 251 * A reference is held on the buffer while on this list.
248 */ 252 */
249 struct list_head active_list; 253 struct list_head active_list;
@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
253 * still have a write_domain which needs to be flushed before 257 * still have a write_domain which needs to be flushed before
254 * unbinding. 258 * unbinding.
255 * 259 *
260 * last_rendering_seqno is 0 while an object is in this list.
261 *
256 * A reference is held on the buffer while on this list. 262 * A reference is held on the buffer while on this list.
257 */ 263 */
258 struct list_head flushing_list; 264 struct list_head flushing_list;
@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
261 * LRU list of objects which are not in the ringbuffer and 267 * LRU list of objects which are not in the ringbuffer and
262 * are ready to unbind, but are still in the GTT. 268 * are ready to unbind, but are still in the GTT.
263 * 269 *
270 * last_rendering_seqno is 0 while an object is in this list.
271 *
264 * A reference is not held on the buffer while on this list, 272 * A reference is not held on the buffer while on this list,
265 * as merely being GTT-bound shouldn't prevent its being 273 * as merely being GTT-bound shouldn't prevent its being
266 * freed, and we'll pull it off the list in the free path. 274 * freed, and we'll pull it off the list in the free path.
@@ -371,8 +379,8 @@ struct drm_i915_gem_object {
371 uint32_t agp_type; 379 uint32_t agp_type;
372 380
373 /** 381 /**
374 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 382 * If present, while GEM_DOMAIN_CPU is in the read domain this array
375 * GEM_DOMAIN_CPU is not in the object's read domain. 383 * flags which individual pages are valid.
376 */ 384 */
377 uint8_t *page_cpu_valid; 385 uint8_t *page_cpu_valid;
378}; 386};
@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
394 /** Time at which this request was emitted, in jiffies. */ 402 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies; 403 unsigned long emitted_jiffies;
396 404
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list; 405 struct list_head list;
401}; 406};
402 407
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f8..3fde82be014f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,21 +33,21 @@
33 33
34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35 35
36static int 36static void
37i915_gem_object_set_domain(struct drm_gem_object *obj, 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38 uint32_t read_domains, 38 uint32_t read_domains,
39 uint32_t write_domain); 39 uint32_t write_domain);
40static int 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41i915_gem_object_set_domain_range(struct drm_gem_object *obj, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint64_t offset, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint64_t size, 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44 uint32_t read_domains, 44 int write);
45 uint32_t write_domain); 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46static int 46 int write);
47i915_gem_set_domain(struct drm_gem_object *obj, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 struct drm_file *file_priv, 48 uint64_t offset,
49 uint32_t read_domains, 49 uint64_t size);
50 uint32_t write_domain); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
162 162
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 164
165 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
166 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
167 if (ret != 0) { 167 if (ret != 0) {
168 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
260 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
261 return ret; 261 return ret;
262 } 262 }
263 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
264 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
265 if (ret) 264 if (ret)
266 goto fail; 265 goto fail;
267 266
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
320 319
321 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
322 321
323 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
324 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
325 if (ret) { 323 if (ret) {
326 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
327 return ret; 325 return ret;
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
397} 395}
398 396
399/** 397/**
400 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
401 */ 400 */
402int 401int
403i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
405{ 404{
406 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
407 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
408 int ret; 409 int ret;
409 410
410 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
411 return -ENODEV; 412 return -ENODEV;
412 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
414 if (obj == NULL) 428 if (obj == NULL)
415 return -EBADF; 429 return -EBADF;
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
417 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
418#if WATCH_BUF 432#if WATCH_BUF
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
421#endif 435#endif
422 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
423 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
424 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
426 return ret; 451 return ret;
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
455 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
456 481
457 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
458 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
459 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
460 drm_agp_chipset_flush(dev); 485
461 }
462 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
463 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
464 return ret; 488 return ret;
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
532} 556}
533 557
534static void 558static void
535i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
536{ 560{
537 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
538 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
546 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
548 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
549} 574}
550 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
551 587
552static void 588static void
553i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
562 else 598 else
563 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
564 600
601 obj_priv->last_rendering_seqno = 0;
565 if (obj_priv->active) { 602 if (obj_priv->active) {
566 obj_priv->active = 0; 603 obj_priv->active = 0;
567 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
610 647
611 request->seqno = seqno; 648 request->seqno = seqno;
612 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
613 request->flush_domains = flush_domains;
614 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
615 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
616 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
617 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
618 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
619 return seqno; 674 return seqno;
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
676 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
677#endif 732#endif
678 733
679 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
680 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
681 &dev_priv->mm.flushing_list); 736 else
682 } else {
683 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
684 }
685 }
686
687 if (request->flush_domains != 0) {
688 struct drm_i915_gem_object *obj_priv, *next;
689
690 /* Clear the write domain and activity from any buffers
691 * that are just waiting for a flush matching the one retired.
692 */
693 list_for_each_entry_safe(obj_priv, next,
694 &dev_priv->mm.flushing_list, list) {
695 struct drm_gem_object *obj = obj_priv->obj;
696
697 if (obj->write_domain & request->flush_domains) {
698 obj->write_domain = 0;
699 i915_gem_object_move_to_inactive(obj);
700 }
701 }
702
703 } 738 }
704} 739}
705 740
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
892 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
893 int ret; 928 int ret;
894 929
895 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
896 * create a new seqno to wait for. 931 * not for emitting required flushes.
897 */ 932 */
898 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
899 uint32_t write_domain = obj->write_domain;
900#if WATCH_BUF
901 DRM_INFO("%s: flushing object %p from write domain %08x\n",
902 __func__, obj, write_domain);
903#endif
904 i915_gem_flush(dev, 0, write_domain);
905
906 i915_gem_object_move_to_active(obj);
907 obj_priv->last_rendering_seqno = i915_add_request(dev,
908 write_domain);
909 BUG_ON(obj_priv->last_rendering_seqno == 0);
910#if WATCH_LRU
911 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
912#endif
913 }
914 934
915 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
916 * it. 936 * it.
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
950 return -EINVAL; 970 return -EINVAL;
951 } 971 }
952 972
953 /* Wait for any rendering to complete
954 */
955 ret = i915_gem_object_wait_rendering(obj);
956 if (ret) {
957 DRM_ERROR("wait_rendering failed: %d\n", ret);
958 return ret;
959 }
960
961 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
962 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
963 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
964 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
965 * before we unbind. 977 * before we unbind.
966 */ 978 */
967 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
968 I915_GEM_DOMAIN_CPU);
969 if (ret) { 980 if (ret) {
970 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
971 return ret; 983 return ret;
972 } 984 }
973 985
@@ -1083,6 +1095,19 @@ i915_gem_evict_something(struct drm_device *dev)
1083} 1095}
1084 1096
1085static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 return ret;
1108}
1109
1110static int
1086i915_gem_object_get_page_list(struct drm_gem_object *obj) 1111i915_gem_object_get_page_list(struct drm_gem_object *obj)
1087{ 1112{
1088 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1113 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1168,7 +1193,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1168 1193
1169 ret = i915_gem_evict_something(dev); 1194 ret = i915_gem_evict_something(dev);
1170 if (ret != 0) { 1195 if (ret != 0) {
1171 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1196 if (ret != -ERESTARTSYS)
1197 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1172 return ret; 1198 return ret;
1173 } 1199 }
1174 goto search_free; 1200 goto search_free;
@@ -1228,6 +1254,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1228 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1254 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1229} 1255}
1230 1256
1257/** Flushes any GPU write domain for the object if it's dirty. */
1258static void
1259i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1260{
1261 struct drm_device *dev = obj->dev;
1262 uint32_t seqno;
1263
1264 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1265 return;
1266
1267 /* Queue the GPU write cache flushing we need. */
1268 i915_gem_flush(dev, 0, obj->write_domain);
1269 seqno = i915_add_request(dev, obj->write_domain);
1270 obj->write_domain = 0;
1271 i915_gem_object_move_to_active(obj, seqno);
1272}
1273
1274/** Flushes the GTT write domain for the object if it's dirty. */
1275static void
1276i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1277{
1278 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1279 return;
1280
1281 /* No actual flushing is required for the GTT write domain. Writes
1282 * to it immediately go to main memory as far as we know, so there's
1283 * no chipset flush. It also doesn't land in render cache.
1284 */
1285 obj->write_domain = 0;
1286}
1287
1288/** Flushes the CPU write domain for the object if it's dirty. */
1289static void
1290i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1291{
1292 struct drm_device *dev = obj->dev;
1293
1294 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1295 return;
1296
1297 i915_gem_clflush_object(obj);
1298 drm_agp_chipset_flush(dev);
1299 obj->write_domain = 0;
1300}
1301
1302/**
1303 * Moves a single object to the GTT read, and possibly write domain.
1304 *
1305 * This function returns when the move is complete, including waiting on
1306 * flushes to occur.
1307 */
1308static int
1309i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1310{
1311 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1312 int ret;
1313
1314 /* Not valid to be called on unbound objects. */
1315 if (obj_priv->gtt_space == NULL)
1316 return -EINVAL;
1317
1318 i915_gem_object_flush_gpu_write_domain(obj);
1319 /* Wait on any GPU rendering and flushing to occur. */
1320 ret = i915_gem_object_wait_rendering(obj);
1321 if (ret != 0)
1322 return ret;
1323
1324 /* If we're writing through the GTT domain, then CPU and GPU caches
1325 * will need to be invalidated at next use.
1326 */
1327 if (write)
1328 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1329
1330 i915_gem_object_flush_cpu_write_domain(obj);
1331
1332 /* It should now be out of any other write domains, and we can update
1333 * the domain values for our changes.
1334 */
1335 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1336 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1337 if (write) {
1338 obj->write_domain = I915_GEM_DOMAIN_GTT;
1339 obj_priv->dirty = 1;
1340 }
1341
1342 return 0;
1343}
1344
1345/**
1346 * Moves a single object to the CPU read, and possibly write domain.
1347 *
1348 * This function returns when the move is complete, including waiting on
1349 * flushes to occur.
1350 */
1351static int
1352i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1353{
1354 struct drm_device *dev = obj->dev;
1355 int ret;
1356
1357 i915_gem_object_flush_gpu_write_domain(obj);
1358 /* Wait on any GPU rendering and flushing to occur. */
1359 ret = i915_gem_object_wait_rendering(obj);
1360 if (ret != 0)
1361 return ret;
1362
1363 i915_gem_object_flush_gtt_write_domain(obj);
1364
1365 /* If we have a partially-valid cache of the object in the CPU,
1366 * finish invalidating it and free the per-page flags.
1367 */
1368 i915_gem_object_set_to_full_cpu_read_domain(obj);
1369
1370 /* Flush the CPU cache if it's still invalid. */
1371 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1372 i915_gem_clflush_object(obj);
1373 drm_agp_chipset_flush(dev);
1374
1375 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1376 }
1377
1378 /* It should now be out of any other write domains, and we can update
1379 * the domain values for our changes.
1380 */
1381 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1382
1383 /* If we're writing through the CPU, then the GPU read domains will
1384 * need to be invalidated at next use.
1385 */
1386 if (write) {
1387 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1388 obj->write_domain = I915_GEM_DOMAIN_CPU;
1389 }
1390
1391 return 0;
1392}
1393
1231/* 1394/*
1232 * Set the next domain for the specified object. This 1395 * Set the next domain for the specified object. This
1233 * may not actually perform the necessary flushing/invaliding though, 1396 * may not actually perform the necessary flushing/invaliding though,
@@ -1339,16 +1502,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1339 * MI_FLUSH 1502 * MI_FLUSH
1340 * drm_agp_chipset_flush 1503 * drm_agp_chipset_flush
1341 */ 1504 */
1342static int 1505static void
1343i915_gem_object_set_domain(struct drm_gem_object *obj, 1506i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1344 uint32_t read_domains, 1507 uint32_t read_domains,
1345 uint32_t write_domain) 1508 uint32_t write_domain)
1346{ 1509{
1347 struct drm_device *dev = obj->dev; 1510 struct drm_device *dev = obj->dev;
1348 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1511 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1349 uint32_t invalidate_domains = 0; 1512 uint32_t invalidate_domains = 0;
1350 uint32_t flush_domains = 0; 1513 uint32_t flush_domains = 0;
1351 int ret; 1514
1515 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1516 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1352 1517
1353#if WATCH_BUF 1518#if WATCH_BUF
1354 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1519 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1385,34 +1550,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1385 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1550 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1386 __func__, flush_domains, invalidate_domains); 1551 __func__, flush_domains, invalidate_domains);
1387#endif 1552#endif
1388 /*
1389 * If we're invaliding the CPU cache and flushing a GPU cache,
1390 * then pause for rendering so that the GPU caches will be
1391 * flushed before the cpu cache is invalidated
1392 */
1393 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1394 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1395 I915_GEM_DOMAIN_GTT))) {
1396 ret = i915_gem_object_wait_rendering(obj);
1397 if (ret)
1398 return ret;
1399 }
1400 i915_gem_clflush_object(obj); 1553 i915_gem_clflush_object(obj);
1401 } 1554 }
1402 1555
1403 if ((write_domain | flush_domains) != 0) 1556 if ((write_domain | flush_domains) != 0)
1404 obj->write_domain = write_domain; 1557 obj->write_domain = write_domain;
1405
1406 /* If we're invalidating the CPU domain, clear the per-page CPU
1407 * domain list as well.
1408 */
1409 if (obj_priv->page_cpu_valid != NULL &&
1410 (write_domain != 0 ||
1411 read_domains & I915_GEM_DOMAIN_CPU)) {
1412 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1413 DRM_MEM_DRIVER);
1414 obj_priv->page_cpu_valid = NULL;
1415 }
1416 obj->read_domains = read_domains; 1558 obj->read_domains = read_domains;
1417 1559
1418 dev->invalidate_domains |= invalidate_domains; 1560 dev->invalidate_domains |= invalidate_domains;
@@ -1423,47 +1565,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1423 obj->read_domains, obj->write_domain, 1565 obj->read_domains, obj->write_domain,
1424 dev->invalidate_domains, dev->flush_domains); 1566 dev->invalidate_domains, dev->flush_domains);
1425#endif 1567#endif
1426 return 0;
1427} 1568}
1428 1569
1429/** 1570/**
1430 * Set the read/write domain on a range of the object. 1571 * Moves the object from a partially CPU read to a full one.
1431 * 1572 *
1432 * Currently only implemented for CPU reads, otherwise drops to normal 1573 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1433 * i915_gem_object_set_domain(). 1574 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1434 */ 1575 */
1435static int 1576static void
1436i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1577i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1437 uint64_t offset,
1438 uint64_t size,
1439 uint32_t read_domains,
1440 uint32_t write_domain)
1441{ 1578{
1579 struct drm_device *dev = obj->dev;
1442 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1580 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1443 int ret, i;
1444 1581
1445 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1582 if (!obj_priv->page_cpu_valid)
1446 return 0; 1583 return;
1447 1584
1448 if (read_domains != I915_GEM_DOMAIN_CPU || 1585 /* If we're partially in the CPU read domain, finish moving it in.
1449 write_domain != 0) 1586 */
1450 return i915_gem_object_set_domain(obj, 1587 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1451 read_domains, write_domain); 1588 int i;
1452 1589
1453 /* Wait on any GPU rendering to the object to be flushed. */ 1590 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1591 if (obj_priv->page_cpu_valid[i])
1592 continue;
1593 drm_clflush_pages(obj_priv->page_list + i, 1);
1594 }
1595 drm_agp_chipset_flush(dev);
1596 }
1597
1598 /* Free the page_cpu_valid mappings which are now stale, whether
1599 * or not we've got I915_GEM_DOMAIN_CPU.
1600 */
1601 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1602 DRM_MEM_DRIVER);
1603 obj_priv->page_cpu_valid = NULL;
1604}
1605
1606/**
1607 * Set the CPU read domain on a range of the object.
1608 *
1609 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1610 * not entirely valid. The page_cpu_valid member of the object flags which
1611 * pages have been flushed, and will be respected by
1612 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1613 * of the whole object.
1614 *
1615 * This function returns when the move is complete, including waiting on
1616 * flushes to occur.
1617 */
1618static int
1619i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1620 uint64_t offset, uint64_t size)
1621{
1622 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1623 int i, ret;
1624
1625 if (offset == 0 && size == obj->size)
1626 return i915_gem_object_set_to_cpu_domain(obj, 0);
1627
1628 i915_gem_object_flush_gpu_write_domain(obj);
1629 /* Wait on any GPU rendering and flushing to occur. */
1454 ret = i915_gem_object_wait_rendering(obj); 1630 ret = i915_gem_object_wait_rendering(obj);
1455 if (ret) 1631 if (ret != 0)
1456 return ret; 1632 return ret;
1633 i915_gem_object_flush_gtt_write_domain(obj);
1457 1634
1635 /* If we're already fully in the CPU read domain, we're done. */
1636 if (obj_priv->page_cpu_valid == NULL &&
1637 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1638 return 0;
1639
1640 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1641 * newly adding I915_GEM_DOMAIN_CPU
1642 */
1458 if (obj_priv->page_cpu_valid == NULL) { 1643 if (obj_priv->page_cpu_valid == NULL) {
1459 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1644 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1460 DRM_MEM_DRIVER); 1645 DRM_MEM_DRIVER);
1461 } 1646 if (obj_priv->page_cpu_valid == NULL)
1647 return -ENOMEM;
1648 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1649 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1462 1650
1463 /* Flush the cache on any pages that are still invalid from the CPU's 1651 /* Flush the cache on any pages that are still invalid from the CPU's
1464 * perspective. 1652 * perspective.
1465 */ 1653 */
1466 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1654 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1655 i++) {
1467 if (obj_priv->page_cpu_valid[i]) 1656 if (obj_priv->page_cpu_valid[i])
1468 continue; 1657 continue;
1469 1658
@@ -1472,39 +1661,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1472 obj_priv->page_cpu_valid[i] = 1; 1661 obj_priv->page_cpu_valid[i] = 1;
1473 } 1662 }
1474 1663
1475 return 0; 1664 /* It should now be out of any other write domains, and we can update
1476} 1665 * the domain values for our changes.
1477
1478/**
1479 * Once all of the objects have been set in the proper domain,
1480 * perform the necessary flush and invalidate operations.
1481 *
1482 * Returns the write domains flushed, for use in flush tracking.
1483 */
1484static uint32_t
1485i915_gem_dev_set_domain(struct drm_device *dev)
1486{
1487 uint32_t flush_domains = dev->flush_domains;
1488
1489 /*
1490 * Now that all the buffers are synced to the proper domains,
1491 * flush and invalidate the collected domains
1492 */ 1666 */
1493 if (dev->invalidate_domains | dev->flush_domains) { 1667 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1494#if WATCH_EXEC
1495 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1496 __func__,
1497 dev->invalidate_domains,
1498 dev->flush_domains);
1499#endif
1500 i915_gem_flush(dev,
1501 dev->invalidate_domains,
1502 dev->flush_domains);
1503 dev->invalidate_domains = 0;
1504 dev->flush_domains = 0;
1505 }
1506 1668
1507 return flush_domains; 1669 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1670
1671 return 0;
1508} 1672}
1509 1673
1510/** 1674/**
@@ -1585,6 +1749,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1585 return -EINVAL; 1749 return -EINVAL;
1586 } 1750 }
1587 1751
1752 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1753 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1754 DRM_ERROR("reloc with read/write CPU domains: "
1755 "obj %p target %d offset %d "
1756 "read %08x write %08x",
1757 obj, reloc.target_handle,
1758 (int) reloc.offset,
1759 reloc.read_domains,
1760 reloc.write_domain);
1761 return -EINVAL;
1762 }
1763
1588 if (reloc.write_domain && target_obj->pending_write_domain && 1764 if (reloc.write_domain && target_obj->pending_write_domain &&
1589 reloc.write_domain != target_obj->pending_write_domain) { 1765 reloc.write_domain != target_obj->pending_write_domain) {
1590 DRM_ERROR("Write domain conflict: " 1766 DRM_ERROR("Write domain conflict: "
@@ -1625,19 +1801,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1625 continue; 1801 continue;
1626 } 1802 }
1627 1803
1628 /* Now that we're going to actually write some data in, 1804 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1629 * make sure that any rendering using this buffer's contents 1805 if (ret != 0) {
1630 * is completed. 1806 drm_gem_object_unreference(target_obj);
1631 */ 1807 i915_gem_object_unpin(obj);
1632 i915_gem_object_wait_rendering(obj); 1808 return -EINVAL;
1633
1634 /* As we're writing through the gtt, flush
1635 * any CPU writes before we write the relocations
1636 */
1637 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1638 i915_gem_clflush_object(obj);
1639 drm_agp_chipset_flush(dev);
1640 obj->write_domain = 0;
1641 } 1809 }
1642 1810
1643 /* Map the page containing the relocation we're going to 1811 /* Map the page containing the relocation we're going to
@@ -1779,6 +1947,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1779 int ret, i, pinned = 0; 1947 int ret, i, pinned = 0;
1780 uint64_t exec_offset; 1948 uint64_t exec_offset;
1781 uint32_t seqno, flush_domains; 1949 uint32_t seqno, flush_domains;
1950 int pin_tries;
1782 1951
1783#if WATCH_EXEC 1952#if WATCH_EXEC
1784 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1953 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1827,14 +1996,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1827 return -EBUSY; 1996 return -EBUSY;
1828 } 1997 }
1829 1998
1830 /* Zero the gloabl flush/invalidate flags. These 1999 /* Look up object handles */
1831 * will be modified as each object is bound to the
1832 * gtt
1833 */
1834 dev->invalidate_domains = 0;
1835 dev->flush_domains = 0;
1836
1837 /* Look up object handles and perform the relocations */
1838 for (i = 0; i < args->buffer_count; i++) { 2000 for (i = 0; i < args->buffer_count; i++) {
1839 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2001 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1840 exec_list[i].handle); 2002 exec_list[i].handle);
@@ -1844,17 +2006,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1844 ret = -EBADF; 2006 ret = -EBADF;
1845 goto err; 2007 goto err;
1846 } 2008 }
2009 }
1847 2010
1848 object_list[i]->pending_read_domains = 0; 2011 /* Pin and relocate */
1849 object_list[i]->pending_write_domain = 0; 2012 for (pin_tries = 0; ; pin_tries++) {
1850 ret = i915_gem_object_pin_and_relocate(object_list[i], 2013 ret = 0;
1851 file_priv, 2014 for (i = 0; i < args->buffer_count; i++) {
1852 &exec_list[i]); 2015 object_list[i]->pending_read_domains = 0;
1853 if (ret) { 2016 object_list[i]->pending_write_domain = 0;
1854 DRM_ERROR("object bind and relocate failed %d\n", ret); 2017 ret = i915_gem_object_pin_and_relocate(object_list[i],
2018 file_priv,
2019 &exec_list[i]);
2020 if (ret)
2021 break;
2022 pinned = i + 1;
2023 }
2024 /* success */
2025 if (ret == 0)
2026 break;
2027
2028 /* error other than GTT full, or we've already tried again */
2029 if (ret != -ENOMEM || pin_tries >= 1) {
2030 DRM_ERROR("Failed to pin buffers %d\n", ret);
1855 goto err; 2031 goto err;
1856 } 2032 }
1857 pinned = i + 1; 2033
2034 /* unpin all of our buffers */
2035 for (i = 0; i < pinned; i++)
2036 i915_gem_object_unpin(object_list[i]);
2037
2038 /* evict everyone we can from the aperture */
2039 ret = i915_gem_evict_everything(dev);
2040 if (ret)
2041 goto err;
1858 } 2042 }
1859 2043
1860 /* Set the pending read domains for the batch buffer to COMMAND */ 2044 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1864,21 +2048,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1864 2048
1865 i915_verify_inactive(dev, __FILE__, __LINE__); 2049 i915_verify_inactive(dev, __FILE__, __LINE__);
1866 2050
2051 /* Zero the global flush/invalidate flags. These
2052 * will be modified as new domains are computed
2053 * for each object
2054 */
2055 dev->invalidate_domains = 0;
2056 dev->flush_domains = 0;
2057
1867 for (i = 0; i < args->buffer_count; i++) { 2058 for (i = 0; i < args->buffer_count; i++) {
1868 struct drm_gem_object *obj = object_list[i]; 2059 struct drm_gem_object *obj = object_list[i];
1869 2060
1870 /* make sure all previous memory operations have passed */ 2061 /* Compute new gpu domains and update invalidate/flush */
1871 ret = i915_gem_object_set_domain(obj, 2062 i915_gem_object_set_to_gpu_domain(obj,
1872 obj->pending_read_domains, 2063 obj->pending_read_domains,
1873 obj->pending_write_domain); 2064 obj->pending_write_domain);
1874 if (ret)
1875 goto err;
1876 } 2065 }
1877 2066
1878 i915_verify_inactive(dev, __FILE__, __LINE__); 2067 i915_verify_inactive(dev, __FILE__, __LINE__);
1879 2068
1880 /* Flush/invalidate caches and chipset buffer */ 2069 if (dev->invalidate_domains | dev->flush_domains) {
1881 flush_domains = i915_gem_dev_set_domain(dev); 2070#if WATCH_EXEC
2071 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2072 __func__,
2073 dev->invalidate_domains,
2074 dev->flush_domains);
2075#endif
2076 i915_gem_flush(dev,
2077 dev->invalidate_domains,
2078 dev->flush_domains);
2079 if (dev->flush_domains)
2080 (void)i915_add_request(dev, dev->flush_domains);
2081 }
1882 2082
1883 i915_verify_inactive(dev, __FILE__, __LINE__); 2083 i915_verify_inactive(dev, __FILE__, __LINE__);
1884 2084
@@ -1898,8 +2098,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1898 ~0); 2098 ~0);
1899#endif 2099#endif
1900 2100
1901 (void)i915_add_request(dev, flush_domains);
1902
1903 /* Exec the batchbuffer */ 2101 /* Exec the batchbuffer */
1904 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2102 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1905 if (ret) { 2103 if (ret) {
@@ -1927,10 +2125,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1927 i915_file_priv->mm.last_gem_seqno = seqno; 2125 i915_file_priv->mm.last_gem_seqno = seqno;
1928 for (i = 0; i < args->buffer_count; i++) { 2126 for (i = 0; i < args->buffer_count; i++) {
1929 struct drm_gem_object *obj = object_list[i]; 2127 struct drm_gem_object *obj = object_list[i];
1930 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1931 2128
1932 i915_gem_object_move_to_active(obj); 2129 i915_gem_object_move_to_active(obj, seqno);
1933 obj_priv->last_rendering_seqno = seqno;
1934#if WATCH_LRU 2130#if WATCH_LRU
1935 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2131 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1936#endif 2132#endif
@@ -2061,11 +2257,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2061 /* XXX - flush the CPU caches for pinned objects 2257 /* XXX - flush the CPU caches for pinned objects
2062 * as the X server doesn't manage domains yet 2258 * as the X server doesn't manage domains yet
2063 */ 2259 */
2064 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2260 i915_gem_object_flush_cpu_write_domain(obj);
2065 i915_gem_clflush_object(obj);
2066 drm_agp_chipset_flush(dev);
2067 obj->write_domain = 0;
2068 }
2069 args->offset = obj_priv->gtt_offset; 2261 args->offset = obj_priv->gtt_offset;
2070 drm_gem_object_unreference(obj); 2262 drm_gem_object_unreference(obj);
2071 mutex_unlock(&dev->struct_mutex); 2263 mutex_unlock(&dev->struct_mutex);
@@ -2167,29 +2359,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2167 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2359 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2168} 2360}
2169 2361
2170static int
2171i915_gem_set_domain(struct drm_gem_object *obj,
2172 struct drm_file *file_priv,
2173 uint32_t read_domains,
2174 uint32_t write_domain)
2175{
2176 struct drm_device *dev = obj->dev;
2177 int ret;
2178 uint32_t flush_domains;
2179
2180 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2181
2182 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2183 if (ret)
2184 return ret;
2185 flush_domains = i915_gem_dev_set_domain(obj->dev);
2186
2187 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2188 (void) i915_add_request(dev, flush_domains);
2189
2190 return 0;
2191}
2192
2193/** Unbinds all objects that are on the given buffer list. */ 2362/** Unbinds all objects that are on the given buffer list. */
2194static int 2363static int
2195i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2364i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) { 122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
123 /* GM965 only does bit 11-based channel 123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
124 * randomization 124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
125 */ 126 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e6..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0) 523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
525 526
526/** 965 MCH register controlling DRAM channel configuration */ 527/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206 528#define C0DRB3 0x10206