aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-28 09:44:08 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-28 15:55:03 -0400
commit4a684a4117abd756291969336af454e8a958802f (patch)
tree1058cb8db13c0cad6ba99be37094fe27d0684714 /drivers/gpu/drm/i915/i915_gem.c
parente5281ccd2e0049e2b9e8ce82449630d25082372d (diff)
drm/i915: Kill GTT mappings when moving from GTT domain
In order to force a page-fault on a GTT mapping after we start using it from the GPU and so enforce correct CPU/GPU synchronisation, we need to invalidate the mapping. Pointed out by Owain G. Ainsworth. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index abe6d901f95b..d4d8f888db85 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1314,12 +1314,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1314 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1314 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1315 if (ret) 1315 if (ret)
1316 goto unlock; 1316 goto unlock;
1317
1318 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1319 if (ret)
1320 goto unlock;
1321 } 1317 }
1322 1318
1319 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1320 if (ret)
1321 goto unlock;
1322
1323 if (!obj_priv->fault_mappable) { 1323 if (!obj_priv->fault_mappable) {
1324 obj_priv->fault_mappable = true; 1324 obj_priv->fault_mappable = true;
1325 i915_gem_info_update_mappable(dev_priv, obj, true); 1325 i915_gem_info_update_mappable(dev_priv, obj, true);
@@ -2859,6 +2859,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2859 * to it immediately go to main memory as far as we know, so there's 2859 * to it immediately go to main memory as far as we know, so there's
2860 * no chipset flush. It also doesn't land in render cache. 2860 * no chipset flush. It also doesn't land in render cache.
2861 */ 2861 */
2862 i915_gem_release_mmap(obj);
2863
2862 old_write_domain = obj->write_domain; 2864 old_write_domain = obj->write_domain;
2863 obj->write_domain = 0; 2865 obj->write_domain = 0;
2864 2866
@@ -3183,6 +3185,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3183 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) 3185 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3184 i915_gem_clflush_object(obj); 3186 i915_gem_clflush_object(obj);
3185 3187
3188 /* blow away mappings if mapped through GTT */
3189 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
3190 i915_gem_release_mmap(obj);
3191
3186 /* The actual obj->write_domain will be updated with 3192 /* The actual obj->write_domain will be updated with
3187 * pending_write_domain after we emit the accumulated flush for all 3193 * pending_write_domain after we emit the accumulated flush for all
3188 * of our domain changes in execbuffers (which clears objects' 3194 * of our domain changes in execbuffers (which clears objects'