diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0b236ea0996c..719a933c5756 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1133,6 +1133,52 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
1133 | return 0; | 1133 | return 0; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | ||
1137 | * as the object state may change during this call. | ||
1138 | */ | ||
1139 | static __must_check int | ||
1140 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | ||
1141 | bool readonly) | ||
1142 | { | ||
1143 | struct drm_device *dev = obj->base.dev; | ||
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1145 | struct intel_ring_buffer *ring = obj->ring; | ||
1146 | u32 seqno; | ||
1147 | int ret; | ||
1148 | |||
1149 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1150 | BUG_ON(!dev_priv->mm.interruptible); | ||
1151 | |||
1152 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | ||
1153 | if (seqno == 0) | ||
1154 | return 0; | ||
1155 | |||
1156 | ret = i915_gem_check_wedge(dev_priv, true); | ||
1157 | if (ret) | ||
1158 | return ret; | ||
1159 | |||
1160 | ret = i915_gem_check_olr(ring, seqno); | ||
1161 | if (ret) | ||
1162 | return ret; | ||
1163 | |||
1164 | mutex_unlock(&dev->struct_mutex); | ||
1165 | ret = __wait_seqno(ring, seqno, true, NULL); | ||
1166 | mutex_lock(&dev->struct_mutex); | ||
1167 | |||
1168 | i915_gem_retire_requests_ring(ring); | ||
1169 | |||
1170 | /* Manually manage the write flush as we may have not yet | ||
1171 | * retired the buffer. | ||
1172 | */ | ||
1173 | if (obj->last_write_seqno && | ||
1174 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
1175 | obj->last_write_seqno = 0; | ||
1176 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1177 | } | ||
1178 | |||
1179 | return ret; | ||
1180 | } | ||
1181 | |||
1136 | /** | 1182 | /** |
1137 | * Called when user space prepares to use an object with the CPU, either | 1183 | * Called when user space prepares to use an object with the CPU, either |
1138 | * through the mmap ioctl's mapping or a GTT mapping. | 1184 | * through the mmap ioctl's mapping or a GTT mapping. |
@@ -1170,6 +1216,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1170 | goto unlock; | 1216 | goto unlock; |
1171 | } | 1217 | } |
1172 | 1218 | ||
1219 | /* Try to flush the object off the GPU without holding the lock. | ||
1220 | * We will repeat the flush holding the lock in the normal manner | ||
1221 | * to catch cases where we are gazumped. | ||
1222 | */ | ||
1223 | ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); | ||
1224 | if (ret) | ||
1225 | goto unref; | ||
1226 | |||
1173 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1227 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
1174 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1228 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
1175 | 1229 | ||
@@ -1183,6 +1237,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1183 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1237 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1184 | } | 1238 | } |
1185 | 1239 | ||
1240 | unref: | ||
1186 | drm_gem_object_unreference(&obj->base); | 1241 | drm_gem_object_unreference(&obj->base); |
1187 | unlock: | 1242 | unlock: |
1188 | mutex_unlock(&dev->struct_mutex); | 1243 | mutex_unlock(&dev->struct_mutex); |