aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c722
1 files changed, 454 insertions, 268 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b4a2bd20640..24fe8c10b4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,21 +31,23 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include <linux/swap.h> 32#include <linux/swap.h>
33 33
34static int 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35i915_gem_object_set_domain(struct drm_gem_object *obj, 35
36 uint32_t read_domains, 36static void
37 uint32_t write_domain); 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38static int 38 uint32_t read_domains,
39i915_gem_object_set_domain_range(struct drm_gem_object *obj, 39 uint32_t write_domain);
40 uint64_t offset, 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 uint64_t size, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint32_t read_domains, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint32_t write_domain); 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44static int 44 int write);
45i915_gem_set_domain(struct drm_gem_object *obj, 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv, 46 int write);
47 uint32_t read_domains, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint32_t write_domain); 48 uint64_t offset,
49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -83,20 +85,14 @@ int
83i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 85i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
84 struct drm_file *file_priv) 86 struct drm_file *file_priv)
85{ 87{
86 drm_i915_private_t *dev_priv = dev->dev_private;
87 struct drm_i915_gem_get_aperture *args = data; 88 struct drm_i915_gem_get_aperture *args = data;
88 struct drm_i915_gem_object *obj_priv;
89 89
90 if (!(dev->driver->driver_features & DRIVER_GEM)) 90 if (!(dev->driver->driver_features & DRIVER_GEM))
91 return -ENODEV; 91 return -ENODEV;
92 92
93 args->aper_size = dev->gtt_total; 93 args->aper_size = dev->gtt_total;
94 args->aper_available_size = args->aper_size; 94 args->aper_available_size = (args->aper_size -
95 95 atomic_read(&dev->pin_memory));
96 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
97 if (obj_priv->pin_count > 0)
98 args->aper_available_size -= obj_priv->obj->size;
99 }
100 96
101 return 0; 97 return 0;
102} 98}
@@ -166,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
166 162
167 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
168 164
169 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
170 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
171 if (ret != 0) { 167 if (ret != 0) {
172 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
173 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -264,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
264 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
265 return ret; 261 return ret;
266 } 262 }
267 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
268 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
269 if (ret) 264 if (ret)
270 goto fail; 265 goto fail;
271 266
@@ -324,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
324 319
325 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
326 321
327 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
328 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
329 if (ret) { 323 if (ret) {
330 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
331 return ret; 325 return ret;
@@ -401,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
401} 395}
402 396
403/** 397/**
404 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
405 */ 400 */
406int 401int
407i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -409,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
409{ 404{
410 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
411 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
412 int ret; 409 int ret;
413 410
414 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
415 return -ENODEV; 412 return -ENODEV;
416 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
417 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
418 if (obj == NULL) 428 if (obj == NULL)
419 return -EBADF; 429 return -EBADF;
@@ -421,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
421 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
422#if WATCH_BUF 432#if WATCH_BUF
423 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
424 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
425#endif 435#endif
426 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
427 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
428 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
429 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
430 return ret; 451 return ret;
@@ -459,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
459 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
460 481
461 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
462 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
463 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
464 drm_agp_chipset_flush(dev); 485
465 }
466 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
467 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
468 return ret; 488 return ret;
@@ -536,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
536} 556}
537 557
538static void 558static void
539i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
540{ 560{
541 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -550,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
550 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
551 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
552 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
553} 574}
554 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
555 587
556static void 588static void
557i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -566,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
566 else 598 else
567 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
568 600
601 obj_priv->last_rendering_seqno = 0;
569 if (obj_priv->active) { 602 if (obj_priv->active) {
570 obj_priv->active = 0; 603 obj_priv->active = 0;
571 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -614,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
614 647
615 request->seqno = seqno; 648 request->seqno = seqno;
616 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
617 request->flush_domains = flush_domains;
618 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
619 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
620 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
621 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
622 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
623 return seqno; 674 return seqno;
@@ -680,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
680 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
681#endif 732#endif
682 733
683 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
684 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
685 &dev_priv->mm.flushing_list); 736 else
686 } else {
687 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
688 }
689 }
690
691 if (request->flush_domains != 0) {
692 struct drm_i915_gem_object *obj_priv, *next;
693
694 /* Clear the write domain and activity from any buffers
695 * that are just waiting for a flush matching the one retired.
696 */
697 list_for_each_entry_safe(obj_priv, next,
698 &dev_priv->mm.flushing_list, list) {
699 struct drm_gem_object *obj = obj_priv->obj;
700
701 if (obj->write_domain & request->flush_domains) {
702 obj->write_domain = 0;
703 i915_gem_object_move_to_inactive(obj);
704 }
705 }
706
707 } 738 }
708} 739}
709 740
@@ -896,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
896 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
897 int ret; 928 int ret;
898 929
899 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
900 * create a new seqno to wait for. 931 * not for emitting required flushes.
901 */ 932 */
902 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
903 uint32_t write_domain = obj->write_domain;
904#if WATCH_BUF
905 DRM_INFO("%s: flushing object %p from write domain %08x\n",
906 __func__, obj, write_domain);
907#endif
908 i915_gem_flush(dev, 0, write_domain);
909
910 i915_gem_object_move_to_active(obj);
911 obj_priv->last_rendering_seqno = i915_add_request(dev,
912 write_domain);
913 BUG_ON(obj_priv->last_rendering_seqno == 0);
914#if WATCH_LRU
915 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
916#endif
917 }
918 934
919 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
920 * it. 936 * it.
@@ -954,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
954 return -EINVAL; 970 return -EINVAL;
955 } 971 }
956 972
957 /* Wait for any rendering to complete
958 */
959 ret = i915_gem_object_wait_rendering(obj);
960 if (ret) {
961 DRM_ERROR("wait_rendering failed: %d\n", ret);
962 return ret;
963 }
964
965 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
966 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
967 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
968 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
969 * before we unbind. 977 * before we unbind.
970 */ 978 */
971 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
972 I915_GEM_DOMAIN_CPU);
973 if (ret) { 980 if (ret) {
974 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
975 return ret; 983 return ret;
976 } 984 }
977 985
@@ -1087,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
1087} 1095}
1088 1096
1089static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 if (ret == -ENOMEM)
1108 return 0;
1109 return ret;
1110}
1111
1112static int
1090i915_gem_object_get_page_list(struct drm_gem_object *obj) 1113i915_gem_object_get_page_list(struct drm_gem_object *obj)
1091{ 1114{
1092 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1115 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1172,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1172 1195
1173 ret = i915_gem_evict_something(dev); 1196 ret = i915_gem_evict_something(dev);
1174 if (ret != 0) { 1197 if (ret != 0) {
1175 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1198 if (ret != -ERESTARTSYS)
1199 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1176 return ret; 1200 return ret;
1177 } 1201 }
1178 goto search_free; 1202 goto search_free;
@@ -1232,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1232 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1256 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1233} 1257}
1234 1258
1259/** Flushes any GPU write domain for the object if it's dirty. */
1260static void
1261i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1262{
1263 struct drm_device *dev = obj->dev;
1264 uint32_t seqno;
1265
1266 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1267 return;
1268
1269 /* Queue the GPU write cache flushing we need. */
1270 i915_gem_flush(dev, 0, obj->write_domain);
1271 seqno = i915_add_request(dev, obj->write_domain);
1272 obj->write_domain = 0;
1273 i915_gem_object_move_to_active(obj, seqno);
1274}
1275
1276/** Flushes the GTT write domain for the object if it's dirty. */
1277static void
1278i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1279{
1280 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1281 return;
1282
1283 /* No actual flushing is required for the GTT write domain. Writes
1284 * to it immediately go to main memory as far as we know, so there's
1285 * no chipset flush. It also doesn't land in render cache.
1286 */
1287 obj->write_domain = 0;
1288}
1289
1290/** Flushes the CPU write domain for the object if it's dirty. */
1291static void
1292i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1293{
1294 struct drm_device *dev = obj->dev;
1295
1296 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1297 return;
1298
1299 i915_gem_clflush_object(obj);
1300 drm_agp_chipset_flush(dev);
1301 obj->write_domain = 0;
1302}
1303
1304/**
1305 * Moves a single object to the GTT read, and possibly write domain.
1306 *
1307 * This function returns when the move is complete, including waiting on
1308 * flushes to occur.
1309 */
1310static int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1314 int ret;
1315
1316 /* Not valid to be called on unbound objects. */
1317 if (obj_priv->gtt_space == NULL)
1318 return -EINVAL;
1319
1320 i915_gem_object_flush_gpu_write_domain(obj);
1321 /* Wait on any GPU rendering and flushing to occur. */
1322 ret = i915_gem_object_wait_rendering(obj);
1323 if (ret != 0)
1324 return ret;
1325
1326 /* If we're writing through the GTT domain, then CPU and GPU caches
1327 * will need to be invalidated at next use.
1328 */
1329 if (write)
1330 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1331
1332 i915_gem_object_flush_cpu_write_domain(obj);
1333
1334 /* It should now be out of any other write domains, and we can update
1335 * the domain values for our changes.
1336 */
1337 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1338 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1339 if (write) {
1340 obj->write_domain = I915_GEM_DOMAIN_GTT;
1341 obj_priv->dirty = 1;
1342 }
1343
1344 return 0;
1345}
1346
1347/**
1348 * Moves a single object to the CPU read, and possibly write domain.
1349 *
1350 * This function returns when the move is complete, including waiting on
1351 * flushes to occur.
1352 */
1353static int
1354i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1355{
1356 struct drm_device *dev = obj->dev;
1357 int ret;
1358
1359 i915_gem_object_flush_gpu_write_domain(obj);
1360 /* Wait on any GPU rendering and flushing to occur. */
1361 ret = i915_gem_object_wait_rendering(obj);
1362 if (ret != 0)
1363 return ret;
1364
1365 i915_gem_object_flush_gtt_write_domain(obj);
1366
1367 /* If we have a partially-valid cache of the object in the CPU,
1368 * finish invalidating it and free the per-page flags.
1369 */
1370 i915_gem_object_set_to_full_cpu_read_domain(obj);
1371
1372 /* Flush the CPU cache if it's still invalid. */
1373 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1374 i915_gem_clflush_object(obj);
1375 drm_agp_chipset_flush(dev);
1376
1377 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1378 }
1379
1380 /* It should now be out of any other write domains, and we can update
1381 * the domain values for our changes.
1382 */
1383 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1384
1385 /* If we're writing through the CPU, then the GPU read domains will
1386 * need to be invalidated at next use.
1387 */
1388 if (write) {
1389 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1390 obj->write_domain = I915_GEM_DOMAIN_CPU;
1391 }
1392
1393 return 0;
1394}
1395
1235/* 1396/*
1236 * Set the next domain for the specified object. This 1397 * Set the next domain for the specified object. This
1237 * may not actually perform the necessary flushing/invaliding though, 1398 * may not actually perform the necessary flushing/invaliding though,
@@ -1343,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1343 * MI_FLUSH 1504 * MI_FLUSH
1344 * drm_agp_chipset_flush 1505 * drm_agp_chipset_flush
1345 */ 1506 */
1346static int 1507static void
1347i915_gem_object_set_domain(struct drm_gem_object *obj, 1508i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1348 uint32_t read_domains, 1509 uint32_t read_domains,
1349 uint32_t write_domain) 1510 uint32_t write_domain)
1350{ 1511{
1351 struct drm_device *dev = obj->dev; 1512 struct drm_device *dev = obj->dev;
1352 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1513 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1353 uint32_t invalidate_domains = 0; 1514 uint32_t invalidate_domains = 0;
1354 uint32_t flush_domains = 0; 1515 uint32_t flush_domains = 0;
1355 int ret; 1516
1517 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1518 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1356 1519
1357#if WATCH_BUF 1520#if WATCH_BUF
1358 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1521 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1389,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1389 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1552 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1390 __func__, flush_domains, invalidate_domains); 1553 __func__, flush_domains, invalidate_domains);
1391#endif 1554#endif
1392 /*
1393 * If we're invaliding the CPU cache and flushing a GPU cache,
1394 * then pause for rendering so that the GPU caches will be
1395 * flushed before the cpu cache is invalidated
1396 */
1397 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1398 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1399 I915_GEM_DOMAIN_GTT))) {
1400 ret = i915_gem_object_wait_rendering(obj);
1401 if (ret)
1402 return ret;
1403 }
1404 i915_gem_clflush_object(obj); 1555 i915_gem_clflush_object(obj);
1405 } 1556 }
1406 1557
1407 if ((write_domain | flush_domains) != 0) 1558 if ((write_domain | flush_domains) != 0)
1408 obj->write_domain = write_domain; 1559 obj->write_domain = write_domain;
1409
1410 /* If we're invalidating the CPU domain, clear the per-page CPU
1411 * domain list as well.
1412 */
1413 if (obj_priv->page_cpu_valid != NULL &&
1414 (write_domain != 0 ||
1415 read_domains & I915_GEM_DOMAIN_CPU)) {
1416 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1417 DRM_MEM_DRIVER);
1418 obj_priv->page_cpu_valid = NULL;
1419 }
1420 obj->read_domains = read_domains; 1560 obj->read_domains = read_domains;
1421 1561
1422 dev->invalidate_domains |= invalidate_domains; 1562 dev->invalidate_domains |= invalidate_domains;
@@ -1427,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1427 obj->read_domains, obj->write_domain, 1567 obj->read_domains, obj->write_domain,
1428 dev->invalidate_domains, dev->flush_domains); 1568 dev->invalidate_domains, dev->flush_domains);
1429#endif 1569#endif
1430 return 0;
1431} 1570}
1432 1571
1433/** 1572/**
1434 * Set the read/write domain on a range of the object. 1573 * Moves the object from a partially CPU read to a full one.
1435 * 1574 *
1436 * Currently only implemented for CPU reads, otherwise drops to normal 1575 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1437 * i915_gem_object_set_domain(). 1576 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1438 */ 1577 */
1439static int 1578static void
1440i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1441 uint64_t offset,
1442 uint64_t size,
1443 uint32_t read_domains,
1444 uint32_t write_domain)
1445{ 1580{
1581 struct drm_device *dev = obj->dev;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1582 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int ret, i;
1448 1583
1449 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1584 if (!obj_priv->page_cpu_valid)
1450 return 0; 1585 return;
1451 1586
1452 if (read_domains != I915_GEM_DOMAIN_CPU || 1587 /* If we're partially in the CPU read domain, finish moving it in.
1453 write_domain != 0) 1588 */
1454 return i915_gem_object_set_domain(obj, 1589 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1455 read_domains, write_domain); 1590 int i;
1456 1591
1457 /* Wait on any GPU rendering to the object to be flushed. */ 1592 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1593 if (obj_priv->page_cpu_valid[i])
1594 continue;
1595 drm_clflush_pages(obj_priv->page_list + i, 1);
1596 }
1597 drm_agp_chipset_flush(dev);
1598 }
1599
1600 /* Free the page_cpu_valid mappings which are now stale, whether
1601 * or not we've got I915_GEM_DOMAIN_CPU.
1602 */
1603 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1604 DRM_MEM_DRIVER);
1605 obj_priv->page_cpu_valid = NULL;
1606}
1607
1608/**
1609 * Set the CPU read domain on a range of the object.
1610 *
1611 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1612 * not entirely valid. The page_cpu_valid member of the object flags which
1613 * pages have been flushed, and will be respected by
1614 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1615 * of the whole object.
1616 *
1617 * This function returns when the move is complete, including waiting on
1618 * flushes to occur.
1619 */
1620static int
1621i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1622 uint64_t offset, uint64_t size)
1623{
1624 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1625 int i, ret;
1626
1627 if (offset == 0 && size == obj->size)
1628 return i915_gem_object_set_to_cpu_domain(obj, 0);
1629
1630 i915_gem_object_flush_gpu_write_domain(obj);
1631 /* Wait on any GPU rendering and flushing to occur. */
1458 ret = i915_gem_object_wait_rendering(obj); 1632 ret = i915_gem_object_wait_rendering(obj);
1459 if (ret) 1633 if (ret != 0)
1460 return ret; 1634 return ret;
1635 i915_gem_object_flush_gtt_write_domain(obj);
1461 1636
1637 /* If we're already fully in the CPU read domain, we're done. */
1638 if (obj_priv->page_cpu_valid == NULL &&
1639 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1640 return 0;
1641
1642 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1643 * newly adding I915_GEM_DOMAIN_CPU
1644 */
1462 if (obj_priv->page_cpu_valid == NULL) { 1645 if (obj_priv->page_cpu_valid == NULL) {
1463 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1646 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1464 DRM_MEM_DRIVER); 1647 DRM_MEM_DRIVER);
1465 } 1648 if (obj_priv->page_cpu_valid == NULL)
1649 return -ENOMEM;
1650 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1651 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1466 1652
1467 /* Flush the cache on any pages that are still invalid from the CPU's 1653 /* Flush the cache on any pages that are still invalid from the CPU's
1468 * perspective. 1654 * perspective.
1469 */ 1655 */
1470 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1656 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1657 i++) {
1471 if (obj_priv->page_cpu_valid[i]) 1658 if (obj_priv->page_cpu_valid[i])
1472 continue; 1659 continue;
1473 1660
@@ -1476,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1476 obj_priv->page_cpu_valid[i] = 1; 1663 obj_priv->page_cpu_valid[i] = 1;
1477 } 1664 }
1478 1665
1479 return 0; 1666 /* It should now be out of any other write domains, and we can update
1480} 1667 * the domain values for our changes.
1481
1482/**
1483 * Once all of the objects have been set in the proper domain,
1484 * perform the necessary flush and invalidate operations.
1485 *
1486 * Returns the write domains flushed, for use in flush tracking.
1487 */
1488static uint32_t
1489i915_gem_dev_set_domain(struct drm_device *dev)
1490{
1491 uint32_t flush_domains = dev->flush_domains;
1492
1493 /*
1494 * Now that all the buffers are synced to the proper domains,
1495 * flush and invalidate the collected domains
1496 */ 1668 */
1497 if (dev->invalidate_domains | dev->flush_domains) { 1669 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1498#if WATCH_EXEC
1499 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1500 __func__,
1501 dev->invalidate_domains,
1502 dev->flush_domains);
1503#endif
1504 i915_gem_flush(dev,
1505 dev->invalidate_domains,
1506 dev->flush_domains);
1507 dev->invalidate_domains = 0;
1508 dev->flush_domains = 0;
1509 }
1510 1670
1511 return flush_domains; 1671 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1672
1673 return 0;
1512} 1674}
1513 1675
1514/** 1676/**
@@ -1589,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1589 return -EINVAL; 1751 return -EINVAL;
1590 } 1752 }
1591 1753
1754 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1755 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1756 DRM_ERROR("reloc with read/write CPU domains: "
1757 "obj %p target %d offset %d "
1758 "read %08x write %08x",
1759 obj, reloc.target_handle,
1760 (int) reloc.offset,
1761 reloc.read_domains,
1762 reloc.write_domain);
1763 return -EINVAL;
1764 }
1765
1592 if (reloc.write_domain && target_obj->pending_write_domain && 1766 if (reloc.write_domain && target_obj->pending_write_domain &&
1593 reloc.write_domain != target_obj->pending_write_domain) { 1767 reloc.write_domain != target_obj->pending_write_domain) {
1594 DRM_ERROR("Write domain conflict: " 1768 DRM_ERROR("Write domain conflict: "
@@ -1629,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1629 continue; 1803 continue;
1630 } 1804 }
1631 1805
1632 /* Now that we're going to actually write some data in, 1806 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1633 * make sure that any rendering using this buffer's contents 1807 if (ret != 0) {
1634 * is completed. 1808 drm_gem_object_unreference(target_obj);
1635 */ 1809 i915_gem_object_unpin(obj);
1636 i915_gem_object_wait_rendering(obj); 1810 return -EINVAL;
1637
1638 /* As we're writing through the gtt, flush
1639 * any CPU writes before we write the relocations
1640 */
1641 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1642 i915_gem_clflush_object(obj);
1643 drm_agp_chipset_flush(dev);
1644 obj->write_domain = 0;
1645 } 1811 }
1646 1812
1647 /* Map the page containing the relocation we're going to 1813 /* Map the page containing the relocation we're going to
@@ -1783,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1783 int ret, i, pinned = 0; 1949 int ret, i, pinned = 0;
1784 uint64_t exec_offset; 1950 uint64_t exec_offset;
1785 uint32_t seqno, flush_domains; 1951 uint32_t seqno, flush_domains;
1952 int pin_tries;
1786 1953
1787#if WATCH_EXEC 1954#if WATCH_EXEC
1788 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1955 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1831,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1831 return -EBUSY; 1998 return -EBUSY;
1832 } 1999 }
1833 2000
1834 /* Zero the gloabl flush/invalidate flags. These 2001 /* Look up object handles */
1835 * will be modified as each object is bound to the
1836 * gtt
1837 */
1838 dev->invalidate_domains = 0;
1839 dev->flush_domains = 0;
1840
1841 /* Look up object handles and perform the relocations */
1842 for (i = 0; i < args->buffer_count; i++) { 2002 for (i = 0; i < args->buffer_count; i++) {
1843 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2003 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1844 exec_list[i].handle); 2004 exec_list[i].handle);
@@ -1848,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1848 ret = -EBADF; 2008 ret = -EBADF;
1849 goto err; 2009 goto err;
1850 } 2010 }
2011 }
1851 2012
1852 object_list[i]->pending_read_domains = 0; 2013 /* Pin and relocate */
1853 object_list[i]->pending_write_domain = 0; 2014 for (pin_tries = 0; ; pin_tries++) {
1854 ret = i915_gem_object_pin_and_relocate(object_list[i], 2015 ret = 0;
1855 file_priv, 2016 for (i = 0; i < args->buffer_count; i++) {
1856 &exec_list[i]); 2017 object_list[i]->pending_read_domains = 0;
1857 if (ret) { 2018 object_list[i]->pending_write_domain = 0;
1858 DRM_ERROR("object bind and relocate failed %d\n", ret); 2019 ret = i915_gem_object_pin_and_relocate(object_list[i],
2020 file_priv,
2021 &exec_list[i]);
2022 if (ret)
2023 break;
2024 pinned = i + 1;
2025 }
2026 /* success */
2027 if (ret == 0)
2028 break;
2029
2030 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret);
1859 goto err; 2033 goto err;
1860 } 2034 }
1861 pinned = i + 1; 2035
2036 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]);
2039
2040 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev);
2042 if (ret)
2043 goto err;
1862 } 2044 }
1863 2045
1864 /* Set the pending read domains for the batch buffer to COMMAND */ 2046 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1868,32 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1868 2050
1869 i915_verify_inactive(dev, __FILE__, __LINE__); 2051 i915_verify_inactive(dev, __FILE__, __LINE__);
1870 2052
2053 /* Zero the global flush/invalidate flags. These
2054 * will be modified as new domains are computed
2055 * for each object
2056 */
2057 dev->invalidate_domains = 0;
2058 dev->flush_domains = 0;
2059
1871 for (i = 0; i < args->buffer_count; i++) { 2060 for (i = 0; i < args->buffer_count; i++) {
1872 struct drm_gem_object *obj = object_list[i]; 2061 struct drm_gem_object *obj = object_list[i];
1873 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1874 2062
1875 if (obj_priv->gtt_space == NULL) { 2063 /* Compute new gpu domains and update invalidate/flush */
1876 /* We evicted the buffer in the process of validating 2064 i915_gem_object_set_to_gpu_domain(obj,
1877 * our set of buffers in. We could try to recover by 2065 obj->pending_read_domains,
1878 * kicking them everything out and trying again from 2066 obj->pending_write_domain);
1879 * the start.
1880 */
1881 ret = -ENOMEM;
1882 goto err;
1883 }
1884
1885 /* make sure all previous memory operations have passed */
1886 ret = i915_gem_object_set_domain(obj,
1887 obj->pending_read_domains,
1888 obj->pending_write_domain);
1889 if (ret)
1890 goto err;
1891 } 2067 }
1892 2068
1893 i915_verify_inactive(dev, __FILE__, __LINE__); 2069 i915_verify_inactive(dev, __FILE__, __LINE__);
1894 2070
1895 /* Flush/invalidate caches and chipset buffer */ 2071 if (dev->invalidate_domains | dev->flush_domains) {
1896 flush_domains = i915_gem_dev_set_domain(dev); 2072#if WATCH_EXEC
2073 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2074 __func__,
2075 dev->invalidate_domains,
2076 dev->flush_domains);
2077#endif
2078 i915_gem_flush(dev,
2079 dev->invalidate_domains,
2080 dev->flush_domains);
2081 if (dev->flush_domains)
2082 (void)i915_add_request(dev, dev->flush_domains);
2083 }
1897 2084
1898 i915_verify_inactive(dev, __FILE__, __LINE__); 2085 i915_verify_inactive(dev, __FILE__, __LINE__);
1899 2086
@@ -1913,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1913 ~0); 2100 ~0);
1914#endif 2101#endif
1915 2102
1916 (void)i915_add_request(dev, flush_domains);
1917
1918 /* Exec the batchbuffer */ 2103 /* Exec the batchbuffer */
1919 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1920 if (ret) { 2105 if (ret) {
@@ -1942,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1942 i915_file_priv->mm.last_gem_seqno = seqno; 2127 i915_file_priv->mm.last_gem_seqno = seqno;
1943 for (i = 0; i < args->buffer_count; i++) { 2128 for (i = 0; i < args->buffer_count; i++) {
1944 struct drm_gem_object *obj = object_list[i]; 2129 struct drm_gem_object *obj = object_list[i];
1945 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1946 2130
1947 i915_gem_object_move_to_active(obj); 2131 i915_gem_object_move_to_active(obj, seqno);
1948 obj_priv->last_rendering_seqno = seqno;
1949#if WATCH_LRU 2132#if WATCH_LRU
1950 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1951#endif 2134#endif
@@ -2076,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2076 /* XXX - flush the CPU caches for pinned objects 2259 /* XXX - flush the CPU caches for pinned objects
2077 * as the X server doesn't manage domains yet 2260 * as the X server doesn't manage domains yet
2078 */ 2261 */
2079 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2262 i915_gem_object_flush_cpu_write_domain(obj);
2080 i915_gem_clflush_object(obj);
2081 drm_agp_chipset_flush(dev);
2082 obj->write_domain = 0;
2083 }
2084 args->offset = obj_priv->gtt_offset; 2263 args->offset = obj_priv->gtt_offset;
2085 drm_gem_object_unreference(obj); 2264 drm_gem_object_unreference(obj);
2086 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
@@ -2130,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2130 } 2309 }
2131 2310
2132 obj_priv = obj->driver_private; 2311 obj_priv = obj->driver_private;
2133 args->busy = obj_priv->active; 2312 /* Don't count being on the flushing list against the object being
2313 * done. Otherwise, a buffer left on the flushing list but not getting
2314 * flushed (because nobody's flushing that domain) won't ever return
2315 * unbusy and get reused by libdrm's bo cache. The other expected
2316 * consumer of this interface, OpenGL's occlusion queries, also specs
2317 * that the objects get unbusy "eventually" without any interference.
2318 */
2319 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
2134 2320
2135 drm_gem_object_unreference(obj); 2321 drm_gem_object_unreference(obj);
2136 mutex_unlock(&dev->struct_mutex); 2322 mutex_unlock(&dev->struct_mutex);
@@ -2182,29 +2368,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2182 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2368 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2183} 2369}
2184 2370
2185static int
2186i915_gem_set_domain(struct drm_gem_object *obj,
2187 struct drm_file *file_priv,
2188 uint32_t read_domains,
2189 uint32_t write_domain)
2190{
2191 struct drm_device *dev = obj->dev;
2192 int ret;
2193 uint32_t flush_domains;
2194
2195 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2196
2197 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2198 if (ret)
2199 return ret;
2200 flush_domains = i915_gem_dev_set_domain(obj->dev);
2201
2202 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2203 (void) i915_add_request(dev, flush_domains);
2204
2205 return 0;
2206}
2207
2208/** Unbinds all objects that are on the given buffer list. */ 2371/** Unbinds all objects that are on the given buffer list. */
2209static int 2372static int
2210i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2373i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
@@ -2299,29 +2462,52 @@ i915_gem_idle(struct drm_device *dev)
2299 2462
2300 i915_gem_retire_requests(dev); 2463 i915_gem_retire_requests(dev);
2301 2464
2302 /* Active and flushing should now be empty as we've 2465 if (!dev_priv->mm.wedged) {
2303 * waited for a sequence higher than any pending execbuffer 2466 /* Active and flushing should now be empty as we've
2304 */ 2467 * waited for a sequence higher than any pending execbuffer
2305 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2468 */
2306 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2469 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2470 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2471 /* Request should now be empty as we've also waited
2472 * for the last request in the list
2473 */
2474 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2475 }
2307 2476
2308 /* Request should now be empty as we've also waited 2477 /* Empty the active and flushing lists to inactive. If there's
2309 * for the last request in the list 2478 * anything left at this point, it means that we're wedged and
2479 * nothing good's going to happen by leaving them there. So strip
2480 * the GPU domains and just stuff them onto inactive.
2310 */ 2481 */
2311 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2482 while (!list_empty(&dev_priv->mm.active_list)) {
2483 struct drm_i915_gem_object *obj_priv;
2484
2485 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2486 struct drm_i915_gem_object,
2487 list);
2488 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2489 i915_gem_object_move_to_inactive(obj_priv->obj);
2490 }
2491
2492 while (!list_empty(&dev_priv->mm.flushing_list)) {
2493 struct drm_i915_gem_object *obj_priv;
2312 2494
2313 /* Move all buffers out of the GTT. */ 2495 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2496 struct drm_i915_gem_object,
2497 list);
2498 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2499 i915_gem_object_move_to_inactive(obj_priv->obj);
2500 }
2501
2502
2503 /* Move all inactive buffers out of the GTT. */
2314 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 2504 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2505 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2315 if (ret) { 2506 if (ret) {
2316 mutex_unlock(&dev->struct_mutex); 2507 mutex_unlock(&dev->struct_mutex);
2317 return ret; 2508 return ret;
2318 } 2509 }
2319 2510
2320 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2321 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2322 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2323 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2324
2325 i915_gem_cleanup_ringbuffer(dev); 2511 i915_gem_cleanup_ringbuffer(dev);
2326 mutex_unlock(&dev->struct_mutex); 2512 mutex_unlock(&dev->struct_mutex);
2327 2513