diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2013-06-29 17:05:26 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-01 05:15:01 -0400 |
commit | d26e3af842023603747f1566caff5f471508bbd4 (patch) | |
tree | d8f8c73b503557952f2e423545c08abc0ee011fd /drivers/gpu/drm/i915/i915_gem.c | |
parent | daa13e1ca587bc773c1aae415ed1af6554117bd4 (diff) |
drm/i915: Refactor the wait_rendering completion into a common routine
Harmonise the completion logic between the non-blocking and normal
wait_rendering paths, and move that logic into a common function.
In the process, we note that the last_write_seqno is by definition the
earlier of the two read/write seqnos and so all successful waits will
have passed the last_write_seqno. Therefore we can unconditionally clear
the write seqno and its domains in the completion logic.
v2: Add the missing ring parameter, because sometimes it is good to have
things compile.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 48 |
1 files changed, 23 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 18025fd45d40..769f75262feb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |||
1087 | interruptible, NULL); | 1087 | interruptible, NULL); |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static int | ||
1091 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, | ||
1092 | struct intel_ring_buffer *ring) | ||
1093 | { | ||
1094 | i915_gem_retire_requests_ring(ring); | ||
1095 | |||
1096 | /* Manually manage the write flush as we may have not yet | ||
1097 | * retired the buffer. | ||
1098 | * | ||
1099 | * Note that the last_write_seqno is always the earlier of | ||
1100 | * the two (read/write) seqno, so if we haved successfully waited, | ||
1101 | * we know we have passed the last write. | ||
1102 | */ | ||
1103 | obj->last_write_seqno = 0; | ||
1104 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1105 | |||
1106 | return 0; | ||
1107 | } | ||
1108 | |||
1090 | /** | 1109 | /** |
1091 | * Ensures that all rendering to the object has completed and the object is | 1110 | * Ensures that all rendering to the object has completed and the object is |
1092 | * safe to unbind from the GTT or access from the CPU. | 1111 | * safe to unbind from the GTT or access from the CPU. |
@@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
1107 | if (ret) | 1126 | if (ret) |
1108 | return ret; | 1127 | return ret; |
1109 | 1128 | ||
1110 | i915_gem_retire_requests_ring(ring); | 1129 | return i915_gem_object_wait_rendering__tail(obj, ring); |
1111 | |||
1112 | /* Manually manage the write flush as we may have not yet | ||
1113 | * retired the buffer. | ||
1114 | */ | ||
1115 | if (obj->last_write_seqno && | ||
1116 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
1117 | obj->last_write_seqno = 0; | ||
1118 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1119 | } | ||
1120 | |||
1121 | return 0; | ||
1122 | } | 1130 | } |
1123 | 1131 | ||
1124 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | 1132 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
@@ -1154,20 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1154 | mutex_unlock(&dev->struct_mutex); | 1162 | mutex_unlock(&dev->struct_mutex); |
1155 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); | 1163 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
1156 | mutex_lock(&dev->struct_mutex); | 1164 | mutex_lock(&dev->struct_mutex); |
1165 | if (ret) | ||
1166 | return ret; | ||
1157 | 1167 | ||
1158 | i915_gem_retire_requests_ring(ring); | 1168 | return i915_gem_object_wait_rendering__tail(obj, ring); |
1159 | |||
1160 | /* Manually manage the write flush as we may have not yet | ||
1161 | * retired the buffer. | ||
1162 | */ | ||
1163 | if (ret == 0 && | ||
1164 | obj->last_write_seqno && | ||
1165 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
1166 | obj->last_write_seqno = 0; | ||
1167 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1168 | } | ||
1169 | |||
1170 | return ret; | ||
1171 | } | 1169 | } |
1172 | 1170 | ||
1173 | /** | 1171 | /** |