diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 639 |
1 files changed, 405 insertions, 234 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d58ddef468f8..ad672d854828 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -33,21 +33,21 @@ | |||
33 | 33 | ||
34 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 34 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
35 | 35 | ||
36 | static int | 36 | static void |
37 | i915_gem_object_set_domain(struct drm_gem_object *obj, | 37 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, |
38 | uint32_t read_domains, | 38 | uint32_t read_domains, |
39 | uint32_t write_domain); | 39 | uint32_t write_domain); |
40 | static int | 40 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
41 | i915_gem_object_set_domain_range(struct drm_gem_object *obj, | 41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
42 | uint64_t offset, | 42 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
43 | uint64_t size, | 43 | static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
44 | uint32_t read_domains, | 44 | int write); |
45 | uint32_t write_domain); | 45 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
46 | static int | 46 | int write); |
47 | i915_gem_set_domain(struct drm_gem_object *obj, | 47 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
48 | struct drm_file *file_priv, | 48 | uint64_t offset, |
49 | uint32_t read_domains, | 49 | uint64_t size); |
50 | uint32_t write_domain); | 50 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
51 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | 51 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); |
52 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | 52 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); |
53 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 53 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
162 | 162 | ||
163 | mutex_lock(&dev->struct_mutex); | 163 | mutex_lock(&dev->struct_mutex); |
164 | 164 | ||
165 | ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, | 165 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, |
166 | I915_GEM_DOMAIN_CPU, 0); | 166 | args->size); |
167 | if (ret != 0) { | 167 | if (ret != 0) { |
168 | drm_gem_object_unreference(obj); | 168 | drm_gem_object_unreference(obj); |
169 | mutex_unlock(&dev->struct_mutex); | 169 | mutex_unlock(&dev->struct_mutex); |
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
260 | mutex_unlock(&dev->struct_mutex); | 260 | mutex_unlock(&dev->struct_mutex); |
261 | return ret; | 261 | return ret; |
262 | } | 262 | } |
263 | ret = i915_gem_set_domain(obj, file_priv, | 263 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
264 | I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); | ||
265 | if (ret) | 264 | if (ret) |
266 | goto fail; | 265 | goto fail; |
267 | 266 | ||
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
320 | 319 | ||
321 | mutex_lock(&dev->struct_mutex); | 320 | mutex_lock(&dev->struct_mutex); |
322 | 321 | ||
323 | ret = i915_gem_set_domain(obj, file_priv, | 322 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
324 | I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); | ||
325 | if (ret) { | 323 | if (ret) { |
326 | mutex_unlock(&dev->struct_mutex); | 324 | mutex_unlock(&dev->struct_mutex); |
327 | return ret; | 325 | return ret; |
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
397 | } | 395 | } |
398 | 396 | ||
399 | /** | 397 | /** |
400 | * Called when user space prepares to use an object | 398 | * Called when user space prepares to use an object with the CPU, either |
399 | * through the mmap ioctl's mapping or a GTT mapping. | ||
401 | */ | 400 | */ |
402 | int | 401 | int |
403 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 402 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
405 | { | 404 | { |
406 | struct drm_i915_gem_set_domain *args = data; | 405 | struct drm_i915_gem_set_domain *args = data; |
407 | struct drm_gem_object *obj; | 406 | struct drm_gem_object *obj; |
407 | uint32_t read_domains = args->read_domains; | ||
408 | uint32_t write_domain = args->write_domain; | ||
408 | int ret; | 409 | int ret; |
409 | 410 | ||
410 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 411 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
411 | return -ENODEV; | 412 | return -ENODEV; |
412 | 413 | ||
414 | /* Only handle setting domains to types used by the CPU. */ | ||
415 | if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
416 | return -EINVAL; | ||
417 | |||
418 | if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | ||
419 | return -EINVAL; | ||
420 | |||
421 | /* Having something in the write domain implies it's in the read | ||
422 | * domain, and only that read domain. Enforce that in the request. | ||
423 | */ | ||
424 | if (write_domain != 0 && read_domains != write_domain) | ||
425 | return -EINVAL; | ||
426 | |||
413 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 427 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
414 | if (obj == NULL) | 428 | if (obj == NULL) |
415 | return -EBADF; | 429 | return -EBADF; |
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
417 | mutex_lock(&dev->struct_mutex); | 431 | mutex_lock(&dev->struct_mutex); |
418 | #if WATCH_BUF | 432 | #if WATCH_BUF |
419 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", | 433 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", |
420 | obj, obj->size, args->read_domains, args->write_domain); | 434 | obj, obj->size, read_domains, write_domain); |
421 | #endif | 435 | #endif |
422 | ret = i915_gem_set_domain(obj, file_priv, | 436 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
423 | args->read_domains, args->write_domain); | 437 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
438 | |||
439 | /* Silently promote "you're not bound, there was nothing to do" | ||
440 | * to success, since the client was just asking us to | ||
441 | * make sure everything was done. | ||
442 | */ | ||
443 | if (ret == -EINVAL) | ||
444 | ret = 0; | ||
445 | } else { | ||
446 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | ||
447 | } | ||
448 | |||
424 | drm_gem_object_unreference(obj); | 449 | drm_gem_object_unreference(obj); |
425 | mutex_unlock(&dev->struct_mutex); | 450 | mutex_unlock(&dev->struct_mutex); |
426 | return ret; | 451 | return ret; |
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
455 | obj_priv = obj->driver_private; | 480 | obj_priv = obj->driver_private; |
456 | 481 | ||
457 | /* Pinned buffers may be scanout, so flush the cache */ | 482 | /* Pinned buffers may be scanout, so flush the cache */ |
458 | if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { | 483 | if (obj_priv->pin_count) |
459 | i915_gem_clflush_object(obj); | 484 | i915_gem_object_flush_cpu_write_domain(obj); |
460 | drm_agp_chipset_flush(dev); | 485 | |
461 | } | ||
462 | drm_gem_object_unreference(obj); | 486 | drm_gem_object_unreference(obj); |
463 | mutex_unlock(&dev->struct_mutex); | 487 | mutex_unlock(&dev->struct_mutex); |
464 | return ret; | 488 | return ret; |
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj) | |||
532 | } | 556 | } |
533 | 557 | ||
534 | static void | 558 | static void |
535 | i915_gem_object_move_to_active(struct drm_gem_object *obj) | 559 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) |
536 | { | 560 | { |
537 | struct drm_device *dev = obj->dev; | 561 | struct drm_device *dev = obj->dev; |
538 | drm_i915_private_t *dev_priv = dev->dev_private; | 562 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) | |||
546 | /* Move from whatever list we were on to the tail of execution. */ | 570 | /* Move from whatever list we were on to the tail of execution. */ |
547 | list_move_tail(&obj_priv->list, | 571 | list_move_tail(&obj_priv->list, |
548 | &dev_priv->mm.active_list); | 572 | &dev_priv->mm.active_list); |
573 | obj_priv->last_rendering_seqno = seqno; | ||
549 | } | 574 | } |
550 | 575 | ||
576 | static void | ||
577 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | ||
578 | { | ||
579 | struct drm_device *dev = obj->dev; | ||
580 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
581 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
582 | |||
583 | BUG_ON(!obj_priv->active); | ||
584 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | ||
585 | obj_priv->last_rendering_seqno = 0; | ||
586 | } | ||
551 | 587 | ||
552 | static void | 588 | static void |
553 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 589 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) |
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
562 | else | 598 | else |
563 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 599 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
564 | 600 | ||
601 | obj_priv->last_rendering_seqno = 0; | ||
565 | if (obj_priv->active) { | 602 | if (obj_priv->active) { |
566 | obj_priv->active = 0; | 603 | obj_priv->active = 0; |
567 | drm_gem_object_unreference(obj); | 604 | drm_gem_object_unreference(obj); |
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) | |||
610 | 647 | ||
611 | request->seqno = seqno; | 648 | request->seqno = seqno; |
612 | request->emitted_jiffies = jiffies; | 649 | request->emitted_jiffies = jiffies; |
613 | request->flush_domains = flush_domains; | ||
614 | was_empty = list_empty(&dev_priv->mm.request_list); | 650 | was_empty = list_empty(&dev_priv->mm.request_list); |
615 | list_add_tail(&request->list, &dev_priv->mm.request_list); | 651 | list_add_tail(&request->list, &dev_priv->mm.request_list); |
616 | 652 | ||
653 | /* Associate any objects on the flushing list matching the write | ||
654 | * domain we're flushing with our flush. | ||
655 | */ | ||
656 | if (flush_domains != 0) { | ||
657 | struct drm_i915_gem_object *obj_priv, *next; | ||
658 | |||
659 | list_for_each_entry_safe(obj_priv, next, | ||
660 | &dev_priv->mm.flushing_list, list) { | ||
661 | struct drm_gem_object *obj = obj_priv->obj; | ||
662 | |||
663 | if ((obj->write_domain & flush_domains) == | ||
664 | obj->write_domain) { | ||
665 | obj->write_domain = 0; | ||
666 | i915_gem_object_move_to_active(obj, seqno); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | } | ||
671 | |||
617 | if (was_empty && !dev_priv->mm.suspended) | 672 | if (was_empty && !dev_priv->mm.suspended) |
618 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 673 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); |
619 | return seqno; | 674 | return seqno; |
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev, | |||
676 | __func__, request->seqno, obj); | 731 | __func__, request->seqno, obj); |
677 | #endif | 732 | #endif |
678 | 733 | ||
679 | if (obj->write_domain != 0) { | 734 | if (obj->write_domain != 0) |
680 | list_move_tail(&obj_priv->list, | 735 | i915_gem_object_move_to_flushing(obj); |
681 | &dev_priv->mm.flushing_list); | 736 | else |
682 | } else { | ||
683 | i915_gem_object_move_to_inactive(obj); | 737 | i915_gem_object_move_to_inactive(obj); |
684 | } | ||
685 | } | ||
686 | |||
687 | if (request->flush_domains != 0) { | ||
688 | struct drm_i915_gem_object *obj_priv, *next; | ||
689 | |||
690 | /* Clear the write domain and activity from any buffers | ||
691 | * that are just waiting for a flush matching the one retired. | ||
692 | */ | ||
693 | list_for_each_entry_safe(obj_priv, next, | ||
694 | &dev_priv->mm.flushing_list, list) { | ||
695 | struct drm_gem_object *obj = obj_priv->obj; | ||
696 | |||
697 | if (obj->write_domain & request->flush_domains) { | ||
698 | obj->write_domain = 0; | ||
699 | i915_gem_object_move_to_inactive(obj); | ||
700 | } | ||
701 | } | ||
702 | |||
703 | } | 738 | } |
704 | } | 739 | } |
705 | 740 | ||
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
892 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 927 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
893 | int ret; | 928 | int ret; |
894 | 929 | ||
895 | /* If there are writes queued to the buffer, flush and | 930 | /* This function only exists to support waiting for existing rendering, |
896 | * create a new seqno to wait for. | 931 | * not for emitting required flushes. |
897 | */ | 932 | */ |
898 | if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { | 933 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); |
899 | uint32_t write_domain = obj->write_domain; | ||
900 | #if WATCH_BUF | ||
901 | DRM_INFO("%s: flushing object %p from write domain %08x\n", | ||
902 | __func__, obj, write_domain); | ||
903 | #endif | ||
904 | i915_gem_flush(dev, 0, write_domain); | ||
905 | |||
906 | i915_gem_object_move_to_active(obj); | ||
907 | obj_priv->last_rendering_seqno = i915_add_request(dev, | ||
908 | write_domain); | ||
909 | BUG_ON(obj_priv->last_rendering_seqno == 0); | ||
910 | #if WATCH_LRU | ||
911 | DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); | ||
912 | #endif | ||
913 | } | ||
914 | 934 | ||
915 | /* If there is rendering queued on the buffer being evicted, wait for | 935 | /* If there is rendering queued on the buffer being evicted, wait for |
916 | * it. | 936 | * it. |
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
950 | return -EINVAL; | 970 | return -EINVAL; |
951 | } | 971 | } |
952 | 972 | ||
953 | /* Wait for any rendering to complete | ||
954 | */ | ||
955 | ret = i915_gem_object_wait_rendering(obj); | ||
956 | if (ret) { | ||
957 | DRM_ERROR("wait_rendering failed: %d\n", ret); | ||
958 | return ret; | ||
959 | } | ||
960 | |||
961 | /* Move the object to the CPU domain to ensure that | 973 | /* Move the object to the CPU domain to ensure that |
962 | * any possible CPU writes while it's not in the GTT | 974 | * any possible CPU writes while it's not in the GTT |
963 | * are flushed when we go to remap it. This will | 975 | * are flushed when we go to remap it. This will |
964 | * also ensure that all pending GPU writes are finished | 976 | * also ensure that all pending GPU writes are finished |
965 | * before we unbind. | 977 | * before we unbind. |
966 | */ | 978 | */ |
967 | ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, | 979 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
968 | I915_GEM_DOMAIN_CPU); | ||
969 | if (ret) { | 980 | if (ret) { |
970 | DRM_ERROR("set_domain failed: %d\n", ret); | 981 | if (ret != -ERESTARTSYS) |
982 | DRM_ERROR("set_domain failed: %d\n", ret); | ||
971 | return ret; | 983 | return ret; |
972 | } | 984 | } |
973 | 985 | ||
@@ -1083,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev) | |||
1083 | } | 1095 | } |
1084 | 1096 | ||
1085 | static int | 1097 | static int |
1098 | i915_gem_evict_everything(struct drm_device *dev) | ||
1099 | { | ||
1100 | int ret; | ||
1101 | |||
1102 | for (;;) { | ||
1103 | ret = i915_gem_evict_something(dev); | ||
1104 | if (ret != 0) | ||
1105 | break; | ||
1106 | } | ||
1107 | if (ret == -ENOMEM) | ||
1108 | return 0; | ||
1109 | return ret; | ||
1110 | } | ||
1111 | |||
1112 | static int | ||
1086 | i915_gem_object_get_page_list(struct drm_gem_object *obj) | 1113 | i915_gem_object_get_page_list(struct drm_gem_object *obj) |
1087 | { | 1114 | { |
1088 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1115 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
@@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1168 | 1195 | ||
1169 | ret = i915_gem_evict_something(dev); | 1196 | ret = i915_gem_evict_something(dev); |
1170 | if (ret != 0) { | 1197 | if (ret != 0) { |
1171 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | 1198 | if (ret != -ERESTARTSYS) |
1199 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | ||
1172 | return ret; | 1200 | return ret; |
1173 | } | 1201 | } |
1174 | goto search_free; | 1202 | goto search_free; |
@@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1228 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); | 1256 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); |
1229 | } | 1257 | } |
1230 | 1258 | ||
1259 | /** Flushes any GPU write domain for the object if it's dirty. */ | ||
1260 | static void | ||
1261 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | ||
1262 | { | ||
1263 | struct drm_device *dev = obj->dev; | ||
1264 | uint32_t seqno; | ||
1265 | |||
1266 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
1267 | return; | ||
1268 | |||
1269 | /* Queue the GPU write cache flushing we need. */ | ||
1270 | i915_gem_flush(dev, 0, obj->write_domain); | ||
1271 | seqno = i915_add_request(dev, obj->write_domain); | ||
1272 | obj->write_domain = 0; | ||
1273 | i915_gem_object_move_to_active(obj, seqno); | ||
1274 | } | ||
1275 | |||
1276 | /** Flushes the GTT write domain for the object if it's dirty. */ | ||
1277 | static void | ||
1278 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | ||
1279 | { | ||
1280 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | ||
1281 | return; | ||
1282 | |||
1283 | /* No actual flushing is required for the GTT write domain. Writes | ||
1284 | * to it immediately go to main memory as far as we know, so there's | ||
1285 | * no chipset flush. It also doesn't land in render cache. | ||
1286 | */ | ||
1287 | obj->write_domain = 0; | ||
1288 | } | ||
1289 | |||
1290 | /** Flushes the CPU write domain for the object if it's dirty. */ | ||
1291 | static void | ||
1292 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | ||
1293 | { | ||
1294 | struct drm_device *dev = obj->dev; | ||
1295 | |||
1296 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | ||
1297 | return; | ||
1298 | |||
1299 | i915_gem_clflush_object(obj); | ||
1300 | drm_agp_chipset_flush(dev); | ||
1301 | obj->write_domain = 0; | ||
1302 | } | ||
1303 | |||
1304 | /** | ||
1305 | * Moves a single object to the GTT read, and possibly write domain. | ||
1306 | * | ||
1307 | * This function returns when the move is complete, including waiting on | ||
1308 | * flushes to occur. | ||
1309 | */ | ||
1310 | static int | ||
1311 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | ||
1312 | { | ||
1313 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1314 | int ret; | ||
1315 | |||
1316 | /* Not valid to be called on unbound objects. */ | ||
1317 | if (obj_priv->gtt_space == NULL) | ||
1318 | return -EINVAL; | ||
1319 | |||
1320 | i915_gem_object_flush_gpu_write_domain(obj); | ||
1321 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1322 | ret = i915_gem_object_wait_rendering(obj); | ||
1323 | if (ret != 0) | ||
1324 | return ret; | ||
1325 | |||
1326 | /* If we're writing through the GTT domain, then CPU and GPU caches | ||
1327 | * will need to be invalidated at next use. | ||
1328 | */ | ||
1329 | if (write) | ||
1330 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
1331 | |||
1332 | i915_gem_object_flush_cpu_write_domain(obj); | ||
1333 | |||
1334 | /* It should now be out of any other write domains, and we can update | ||
1335 | * the domain values for our changes. | ||
1336 | */ | ||
1337 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
1338 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
1339 | if (write) { | ||
1340 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
1341 | obj_priv->dirty = 1; | ||
1342 | } | ||
1343 | |||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | /** | ||
1348 | * Moves a single object to the CPU read, and possibly write domain. | ||
1349 | * | ||
1350 | * This function returns when the move is complete, including waiting on | ||
1351 | * flushes to occur. | ||
1352 | */ | ||
1353 | static int | ||
1354 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | ||
1355 | { | ||
1356 | struct drm_device *dev = obj->dev; | ||
1357 | int ret; | ||
1358 | |||
1359 | i915_gem_object_flush_gpu_write_domain(obj); | ||
1360 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1361 | ret = i915_gem_object_wait_rendering(obj); | ||
1362 | if (ret != 0) | ||
1363 | return ret; | ||
1364 | |||
1365 | i915_gem_object_flush_gtt_write_domain(obj); | ||
1366 | |||
1367 | /* If we have a partially-valid cache of the object in the CPU, | ||
1368 | * finish invalidating it and free the per-page flags. | ||
1369 | */ | ||
1370 | i915_gem_object_set_to_full_cpu_read_domain(obj); | ||
1371 | |||
1372 | /* Flush the CPU cache if it's still invalid. */ | ||
1373 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | ||
1374 | i915_gem_clflush_object(obj); | ||
1375 | drm_agp_chipset_flush(dev); | ||
1376 | |||
1377 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | ||
1378 | } | ||
1379 | |||
1380 | /* It should now be out of any other write domains, and we can update | ||
1381 | * the domain values for our changes. | ||
1382 | */ | ||
1383 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | ||
1384 | |||
1385 | /* If we're writing through the CPU, then the GPU read domains will | ||
1386 | * need to be invalidated at next use. | ||
1387 | */ | ||
1388 | if (write) { | ||
1389 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | ||
1390 | obj->write_domain = I915_GEM_DOMAIN_CPU; | ||
1391 | } | ||
1392 | |||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1231 | /* | 1396 | /* |
1232 | * Set the next domain for the specified object. This | 1397 | * Set the next domain for the specified object. This |
1233 | * may not actually perform the necessary flushing/invaliding though, | 1398 | * may not actually perform the necessary flushing/invaliding though, |
@@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1339 | * MI_FLUSH | 1504 | * MI_FLUSH |
1340 | * drm_agp_chipset_flush | 1505 | * drm_agp_chipset_flush |
1341 | */ | 1506 | */ |
1342 | static int | 1507 | static void |
1343 | i915_gem_object_set_domain(struct drm_gem_object *obj, | 1508 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, |
1344 | uint32_t read_domains, | 1509 | uint32_t read_domains, |
1345 | uint32_t write_domain) | 1510 | uint32_t write_domain) |
1346 | { | 1511 | { |
1347 | struct drm_device *dev = obj->dev; | 1512 | struct drm_device *dev = obj->dev; |
1348 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1513 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1349 | uint32_t invalidate_domains = 0; | 1514 | uint32_t invalidate_domains = 0; |
1350 | uint32_t flush_domains = 0; | 1515 | uint32_t flush_domains = 0; |
1351 | int ret; | 1516 | |
1517 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | ||
1518 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | ||
1352 | 1519 | ||
1353 | #if WATCH_BUF | 1520 | #if WATCH_BUF |
1354 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 1521 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
@@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, | |||
1385 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 1552 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
1386 | __func__, flush_domains, invalidate_domains); | 1553 | __func__, flush_domains, invalidate_domains); |
1387 | #endif | 1554 | #endif |
1388 | /* | ||
1389 | * If we're invaliding the CPU cache and flushing a GPU cache, | ||
1390 | * then pause for rendering so that the GPU caches will be | ||
1391 | * flushed before the cpu cache is invalidated | ||
1392 | */ | ||
1393 | if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && | ||
1394 | (flush_domains & ~(I915_GEM_DOMAIN_CPU | | ||
1395 | I915_GEM_DOMAIN_GTT))) { | ||
1396 | ret = i915_gem_object_wait_rendering(obj); | ||
1397 | if (ret) | ||
1398 | return ret; | ||
1399 | } | ||
1400 | i915_gem_clflush_object(obj); | 1555 | i915_gem_clflush_object(obj); |
1401 | } | 1556 | } |
1402 | 1557 | ||
1403 | if ((write_domain | flush_domains) != 0) | 1558 | if ((write_domain | flush_domains) != 0) |
1404 | obj->write_domain = write_domain; | 1559 | obj->write_domain = write_domain; |
1405 | |||
1406 | /* If we're invalidating the CPU domain, clear the per-page CPU | ||
1407 | * domain list as well. | ||
1408 | */ | ||
1409 | if (obj_priv->page_cpu_valid != NULL && | ||
1410 | (write_domain != 0 || | ||
1411 | read_domains & I915_GEM_DOMAIN_CPU)) { | ||
1412 | drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, | ||
1413 | DRM_MEM_DRIVER); | ||
1414 | obj_priv->page_cpu_valid = NULL; | ||
1415 | } | ||
1416 | obj->read_domains = read_domains; | 1560 | obj->read_domains = read_domains; |
1417 | 1561 | ||
1418 | dev->invalidate_domains |= invalidate_domains; | 1562 | dev->invalidate_domains |= invalidate_domains; |
@@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, | |||
1423 | obj->read_domains, obj->write_domain, | 1567 | obj->read_domains, obj->write_domain, |
1424 | dev->invalidate_domains, dev->flush_domains); | 1568 | dev->invalidate_domains, dev->flush_domains); |
1425 | #endif | 1569 | #endif |
1426 | return 0; | ||
1427 | } | 1570 | } |
1428 | 1571 | ||
1429 | /** | 1572 | /** |
1430 | * Set the read/write domain on a range of the object. | 1573 | * Moves the object from a partially CPU read to a full one. |
1431 | * | 1574 | * |
1432 | * Currently only implemented for CPU reads, otherwise drops to normal | 1575 | * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), |
1433 | * i915_gem_object_set_domain(). | 1576 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
1434 | */ | 1577 | */ |
1435 | static int | 1578 | static void |
1436 | i915_gem_object_set_domain_range(struct drm_gem_object *obj, | 1579 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
1437 | uint64_t offset, | ||
1438 | uint64_t size, | ||
1439 | uint32_t read_domains, | ||
1440 | uint32_t write_domain) | ||
1441 | { | 1580 | { |
1581 | struct drm_device *dev = obj->dev; | ||
1442 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1582 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1443 | int ret, i; | ||
1444 | 1583 | ||
1445 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) | 1584 | if (!obj_priv->page_cpu_valid) |
1446 | return 0; | 1585 | return; |
1447 | 1586 | ||
1448 | if (read_domains != I915_GEM_DOMAIN_CPU || | 1587 | /* If we're partially in the CPU read domain, finish moving it in. |
1449 | write_domain != 0) | 1588 | */ |
1450 | return i915_gem_object_set_domain(obj, | 1589 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { |
1451 | read_domains, write_domain); | 1590 | int i; |
1452 | 1591 | ||
1453 | /* Wait on any GPU rendering to the object to be flushed. */ | 1592 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { |
1593 | if (obj_priv->page_cpu_valid[i]) | ||
1594 | continue; | ||
1595 | drm_clflush_pages(obj_priv->page_list + i, 1); | ||
1596 | } | ||
1597 | drm_agp_chipset_flush(dev); | ||
1598 | } | ||
1599 | |||
1600 | /* Free the page_cpu_valid mappings which are now stale, whether | ||
1601 | * or not we've got I915_GEM_DOMAIN_CPU. | ||
1602 | */ | ||
1603 | drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, | ||
1604 | DRM_MEM_DRIVER); | ||
1605 | obj_priv->page_cpu_valid = NULL; | ||
1606 | } | ||
1607 | |||
1608 | /** | ||
1609 | * Set the CPU read domain on a range of the object. | ||
1610 | * | ||
1611 | * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's | ||
1612 | * not entirely valid. The page_cpu_valid member of the object flags which | ||
1613 | * pages have been flushed, and will be respected by | ||
1614 | * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping | ||
1615 | * of the whole object. | ||
1616 | * | ||
1617 | * This function returns when the move is complete, including waiting on | ||
1618 | * flushes to occur. | ||
1619 | */ | ||
1620 | static int | ||
1621 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | ||
1622 | uint64_t offset, uint64_t size) | ||
1623 | { | ||
1624 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1625 | int i, ret; | ||
1626 | |||
1627 | if (offset == 0 && size == obj->size) | ||
1628 | return i915_gem_object_set_to_cpu_domain(obj, 0); | ||
1629 | |||
1630 | i915_gem_object_flush_gpu_write_domain(obj); | ||
1631 | /* Wait on any GPU rendering and flushing to occur. */ | ||
1454 | ret = i915_gem_object_wait_rendering(obj); | 1632 | ret = i915_gem_object_wait_rendering(obj); |
1455 | if (ret) | 1633 | if (ret != 0) |
1456 | return ret; | 1634 | return ret; |
1635 | i915_gem_object_flush_gtt_write_domain(obj); | ||
1457 | 1636 | ||
1637 | /* If we're already fully in the CPU read domain, we're done. */ | ||
1638 | if (obj_priv->page_cpu_valid == NULL && | ||
1639 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | ||
1640 | return 0; | ||
1641 | |||
1642 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | ||
1643 | * newly adding I915_GEM_DOMAIN_CPU | ||
1644 | */ | ||
1458 | if (obj_priv->page_cpu_valid == NULL) { | 1645 | if (obj_priv->page_cpu_valid == NULL) { |
1459 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, | 1646 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, |
1460 | DRM_MEM_DRIVER); | 1647 | DRM_MEM_DRIVER); |
1461 | } | 1648 | if (obj_priv->page_cpu_valid == NULL) |
1649 | return -ENOMEM; | ||
1650 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | ||
1651 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | ||
1462 | 1652 | ||
1463 | /* Flush the cache on any pages that are still invalid from the CPU's | 1653 | /* Flush the cache on any pages that are still invalid from the CPU's |
1464 | * perspective. | 1654 | * perspective. |
1465 | */ | 1655 | */ |
1466 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { | 1656 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
1657 | i++) { | ||
1467 | if (obj_priv->page_cpu_valid[i]) | 1658 | if (obj_priv->page_cpu_valid[i]) |
1468 | continue; | 1659 | continue; |
1469 | 1660 | ||
@@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, | |||
1472 | obj_priv->page_cpu_valid[i] = 1; | 1663 | obj_priv->page_cpu_valid[i] = 1; |
1473 | } | 1664 | } |
1474 | 1665 | ||
1475 | return 0; | 1666 | /* It should now be out of any other write domains, and we can update |
1476 | } | 1667 | * the domain values for our changes. |
1477 | |||
1478 | /** | ||
1479 | * Once all of the objects have been set in the proper domain, | ||
1480 | * perform the necessary flush and invalidate operations. | ||
1481 | * | ||
1482 | * Returns the write domains flushed, for use in flush tracking. | ||
1483 | */ | ||
1484 | static uint32_t | ||
1485 | i915_gem_dev_set_domain(struct drm_device *dev) | ||
1486 | { | ||
1487 | uint32_t flush_domains = dev->flush_domains; | ||
1488 | |||
1489 | /* | ||
1490 | * Now that all the buffers are synced to the proper domains, | ||
1491 | * flush and invalidate the collected domains | ||
1492 | */ | 1668 | */ |
1493 | if (dev->invalidate_domains | dev->flush_domains) { | 1669 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
1494 | #if WATCH_EXEC | ||
1495 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
1496 | __func__, | ||
1497 | dev->invalidate_domains, | ||
1498 | dev->flush_domains); | ||
1499 | #endif | ||
1500 | i915_gem_flush(dev, | ||
1501 | dev->invalidate_domains, | ||
1502 | dev->flush_domains); | ||
1503 | dev->invalidate_domains = 0; | ||
1504 | dev->flush_domains = 0; | ||
1505 | } | ||
1506 | 1670 | ||
1507 | return flush_domains; | 1671 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
1672 | |||
1673 | return 0; | ||
1508 | } | 1674 | } |
1509 | 1675 | ||
1510 | /** | 1676 | /** |
@@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1585 | return -EINVAL; | 1751 | return -EINVAL; |
1586 | } | 1752 | } |
1587 | 1753 | ||
1754 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | ||
1755 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | ||
1756 | DRM_ERROR("reloc with read/write CPU domains: " | ||
1757 | "obj %p target %d offset %d " | ||
1758 | "read %08x write %08x", | ||
1759 | obj, reloc.target_handle, | ||
1760 | (int) reloc.offset, | ||
1761 | reloc.read_domains, | ||
1762 | reloc.write_domain); | ||
1763 | return -EINVAL; | ||
1764 | } | ||
1765 | |||
1588 | if (reloc.write_domain && target_obj->pending_write_domain && | 1766 | if (reloc.write_domain && target_obj->pending_write_domain && |
1589 | reloc.write_domain != target_obj->pending_write_domain) { | 1767 | reloc.write_domain != target_obj->pending_write_domain) { |
1590 | DRM_ERROR("Write domain conflict: " | 1768 | DRM_ERROR("Write domain conflict: " |
@@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
1625 | continue; | 1803 | continue; |
1626 | } | 1804 | } |
1627 | 1805 | ||
1628 | /* Now that we're going to actually write some data in, | 1806 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
1629 | * make sure that any rendering using this buffer's contents | 1807 | if (ret != 0) { |
1630 | * is completed. | 1808 | drm_gem_object_unreference(target_obj); |
1631 | */ | 1809 | i915_gem_object_unpin(obj); |
1632 | i915_gem_object_wait_rendering(obj); | 1810 | return -EINVAL; |
1633 | |||
1634 | /* As we're writing through the gtt, flush | ||
1635 | * any CPU writes before we write the relocations | ||
1636 | */ | ||
1637 | if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | ||
1638 | i915_gem_clflush_object(obj); | ||
1639 | drm_agp_chipset_flush(dev); | ||
1640 | obj->write_domain = 0; | ||
1641 | } | 1811 | } |
1642 | 1812 | ||
1643 | /* Map the page containing the relocation we're going to | 1813 | /* Map the page containing the relocation we're going to |
@@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1779 | int ret, i, pinned = 0; | 1949 | int ret, i, pinned = 0; |
1780 | uint64_t exec_offset; | 1950 | uint64_t exec_offset; |
1781 | uint32_t seqno, flush_domains; | 1951 | uint32_t seqno, flush_domains; |
1952 | int pin_tries; | ||
1782 | 1953 | ||
1783 | #if WATCH_EXEC | 1954 | #if WATCH_EXEC |
1784 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 1955 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
@@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1827 | return -EBUSY; | 1998 | return -EBUSY; |
1828 | } | 1999 | } |
1829 | 2000 | ||
1830 | /* Zero the gloabl flush/invalidate flags. These | 2001 | /* Look up object handles */ |
1831 | * will be modified as each object is bound to the | ||
1832 | * gtt | ||
1833 | */ | ||
1834 | dev->invalidate_domains = 0; | ||
1835 | dev->flush_domains = 0; | ||
1836 | |||
1837 | /* Look up object handles and perform the relocations */ | ||
1838 | for (i = 0; i < args->buffer_count; i++) { | 2002 | for (i = 0; i < args->buffer_count; i++) { |
1839 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 2003 | object_list[i] = drm_gem_object_lookup(dev, file_priv, |
1840 | exec_list[i].handle); | 2004 | exec_list[i].handle); |
@@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1844 | ret = -EBADF; | 2008 | ret = -EBADF; |
1845 | goto err; | 2009 | goto err; |
1846 | } | 2010 | } |
2011 | } | ||
1847 | 2012 | ||
1848 | object_list[i]->pending_read_domains = 0; | 2013 | /* Pin and relocate */ |
1849 | object_list[i]->pending_write_domain = 0; | 2014 | for (pin_tries = 0; ; pin_tries++) { |
1850 | ret = i915_gem_object_pin_and_relocate(object_list[i], | 2015 | ret = 0; |
1851 | file_priv, | 2016 | for (i = 0; i < args->buffer_count; i++) { |
1852 | &exec_list[i]); | 2017 | object_list[i]->pending_read_domains = 0; |
1853 | if (ret) { | 2018 | object_list[i]->pending_write_domain = 0; |
1854 | DRM_ERROR("object bind and relocate failed %d\n", ret); | 2019 | ret = i915_gem_object_pin_and_relocate(object_list[i], |
2020 | file_priv, | ||
2021 | &exec_list[i]); | ||
2022 | if (ret) | ||
2023 | break; | ||
2024 | pinned = i + 1; | ||
2025 | } | ||
2026 | /* success */ | ||
2027 | if (ret == 0) | ||
2028 | break; | ||
2029 | |||
2030 | /* error other than GTT full, or we've already tried again */ | ||
2031 | if (ret != -ENOMEM || pin_tries >= 1) { | ||
2032 | DRM_ERROR("Failed to pin buffers %d\n", ret); | ||
1855 | goto err; | 2033 | goto err; |
1856 | } | 2034 | } |
1857 | pinned = i + 1; | 2035 | |
2036 | /* unpin all of our buffers */ | ||
2037 | for (i = 0; i < pinned; i++) | ||
2038 | i915_gem_object_unpin(object_list[i]); | ||
2039 | |||
2040 | /* evict everyone we can from the aperture */ | ||
2041 | ret = i915_gem_evict_everything(dev); | ||
2042 | if (ret) | ||
2043 | goto err; | ||
1858 | } | 2044 | } |
1859 | 2045 | ||
1860 | /* Set the pending read domains for the batch buffer to COMMAND */ | 2046 | /* Set the pending read domains for the batch buffer to COMMAND */ |
@@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1864 | 2050 | ||
1865 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2051 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1866 | 2052 | ||
2053 | /* Zero the global flush/invalidate flags. These | ||
2054 | * will be modified as new domains are computed | ||
2055 | * for each object | ||
2056 | */ | ||
2057 | dev->invalidate_domains = 0; | ||
2058 | dev->flush_domains = 0; | ||
2059 | |||
1867 | for (i = 0; i < args->buffer_count; i++) { | 2060 | for (i = 0; i < args->buffer_count; i++) { |
1868 | struct drm_gem_object *obj = object_list[i]; | 2061 | struct drm_gem_object *obj = object_list[i]; |
1869 | 2062 | ||
1870 | /* make sure all previous memory operations have passed */ | 2063 | /* Compute new gpu domains and update invalidate/flush */ |
1871 | ret = i915_gem_object_set_domain(obj, | 2064 | i915_gem_object_set_to_gpu_domain(obj, |
1872 | obj->pending_read_domains, | 2065 | obj->pending_read_domains, |
1873 | obj->pending_write_domain); | 2066 | obj->pending_write_domain); |
1874 | if (ret) | ||
1875 | goto err; | ||
1876 | } | 2067 | } |
1877 | 2068 | ||
1878 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2069 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1879 | 2070 | ||
1880 | /* Flush/invalidate caches and chipset buffer */ | 2071 | if (dev->invalidate_domains | dev->flush_domains) { |
1881 | flush_domains = i915_gem_dev_set_domain(dev); | 2072 | #if WATCH_EXEC |
2073 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
2074 | __func__, | ||
2075 | dev->invalidate_domains, | ||
2076 | dev->flush_domains); | ||
2077 | #endif | ||
2078 | i915_gem_flush(dev, | ||
2079 | dev->invalidate_domains, | ||
2080 | dev->flush_domains); | ||
2081 | if (dev->flush_domains) | ||
2082 | (void)i915_add_request(dev, dev->flush_domains); | ||
2083 | } | ||
1882 | 2084 | ||
1883 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2085 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1884 | 2086 | ||
@@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1898 | ~0); | 2100 | ~0); |
1899 | #endif | 2101 | #endif |
1900 | 2102 | ||
1901 | (void)i915_add_request(dev, flush_domains); | ||
1902 | |||
1903 | /* Exec the batchbuffer */ | 2103 | /* Exec the batchbuffer */ |
1904 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); | 2104 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); |
1905 | if (ret) { | 2105 | if (ret) { |
@@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1927 | i915_file_priv->mm.last_gem_seqno = seqno; | 2127 | i915_file_priv->mm.last_gem_seqno = seqno; |
1928 | for (i = 0; i < args->buffer_count; i++) { | 2128 | for (i = 0; i < args->buffer_count; i++) { |
1929 | struct drm_gem_object *obj = object_list[i]; | 2129 | struct drm_gem_object *obj = object_list[i]; |
1930 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1931 | 2130 | ||
1932 | i915_gem_object_move_to_active(obj); | 2131 | i915_gem_object_move_to_active(obj, seqno); |
1933 | obj_priv->last_rendering_seqno = seqno; | ||
1934 | #if WATCH_LRU | 2132 | #if WATCH_LRU |
1935 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | 2133 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); |
1936 | #endif | 2134 | #endif |
@@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
2061 | /* XXX - flush the CPU caches for pinned objects | 2259 | /* XXX - flush the CPU caches for pinned objects |
2062 | * as the X server doesn't manage domains yet | 2260 | * as the X server doesn't manage domains yet |
2063 | */ | 2261 | */ |
2064 | if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | 2262 | i915_gem_object_flush_cpu_write_domain(obj); |
2065 | i915_gem_clflush_object(obj); | ||
2066 | drm_agp_chipset_flush(dev); | ||
2067 | obj->write_domain = 0; | ||
2068 | } | ||
2069 | args->offset = obj_priv->gtt_offset; | 2263 | args->offset = obj_priv->gtt_offset; |
2070 | drm_gem_object_unreference(obj); | 2264 | drm_gem_object_unreference(obj); |
2071 | mutex_unlock(&dev->struct_mutex); | 2265 | mutex_unlock(&dev->struct_mutex); |
@@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
2167 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 2361 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
2168 | } | 2362 | } |
2169 | 2363 | ||
2170 | static int | ||
2171 | i915_gem_set_domain(struct drm_gem_object *obj, | ||
2172 | struct drm_file *file_priv, | ||
2173 | uint32_t read_domains, | ||
2174 | uint32_t write_domain) | ||
2175 | { | ||
2176 | struct drm_device *dev = obj->dev; | ||
2177 | int ret; | ||
2178 | uint32_t flush_domains; | ||
2179 | |||
2180 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
2181 | |||
2182 | ret = i915_gem_object_set_domain(obj, read_domains, write_domain); | ||
2183 | if (ret) | ||
2184 | return ret; | ||
2185 | flush_domains = i915_gem_dev_set_domain(obj->dev); | ||
2186 | |||
2187 | if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) | ||
2188 | (void) i915_add_request(dev, flush_domains); | ||
2189 | |||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | /** Unbinds all objects that are on the given buffer list. */ | 2364 | /** Unbinds all objects that are on the given buffer list. */ |
2194 | static int | 2365 | static int |
2195 | i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | 2366 | i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) |