aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-25 08:22:28 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-25 09:27:22 -0500
commiteb8d0f5af4ec2d172baf8b4b9a2199cd916b4e54 (patch)
tree28293a5cdfd09863ce764d181c5039cce25b79a2 /drivers/gpu/drm/i915/i915_request.c
parentfe62365f9f80a1c1d438c54fba21f5108a182de8 (diff)
drm/i915: Remove GPU reset dependence on struct_mutex
Now that the submission backends are controlled via their own spinlocks, with a wave of a magic wand we can lift the struct_mutex requirement around GPU reset. That is we allow the submission frontend (userspace) to keep on submitting while we process the GPU reset as we can suspend the backend independently. The major change is around the backoff/handoff strategy for performing the reset. With no mutex deadlock, we no longer have to coordinate with any waiter, and just perform the reset immediately. Testcase: igt/gem_mmap_gtt/hang # regresses Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190125132230.22221-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c47
1 files changed, 0 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ddc35e9dc0c0..f4241a17e2ad 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1083,18 +1083,6 @@ static bool __i915_spin_request(const struct i915_request *rq,
1083 return false; 1083 return false;
1084} 1084}
1085 1085
1086static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1087{
1088 struct i915_gpu_error *error = &request->i915->gpu_error;
1089
1090 if (likely(!i915_reset_handoff(error)))
1091 return false;
1092
1093 __set_current_state(TASK_RUNNING);
1094 i915_reset(request->i915, error->stalled_mask, error->reason);
1095 return true;
1096}
1097
1098/** 1086/**
1099 * i915_request_wait - wait until execution of request has finished 1087 * i915_request_wait - wait until execution of request has finished
1100 * @rq: the request to wait upon 1088 * @rq: the request to wait upon
@@ -1120,17 +1108,10 @@ long i915_request_wait(struct i915_request *rq,
1120{ 1108{
1121 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1109 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1122 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1110 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1123 wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1124 DEFINE_WAIT_FUNC(reset, default_wake_function);
1125 DEFINE_WAIT_FUNC(exec, default_wake_function); 1111 DEFINE_WAIT_FUNC(exec, default_wake_function);
1126 struct intel_wait wait; 1112 struct intel_wait wait;
1127 1113
1128 might_sleep(); 1114 might_sleep();
1129#if IS_ENABLED(CONFIG_LOCKDEP)
1130 GEM_BUG_ON(debug_locks &&
1131 !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1132 !!(flags & I915_WAIT_LOCKED));
1133#endif
1134 GEM_BUG_ON(timeout < 0); 1115 GEM_BUG_ON(timeout < 0);
1135 1116
1136 if (i915_request_completed(rq)) 1117 if (i915_request_completed(rq))
@@ -1140,11 +1121,7 @@ long i915_request_wait(struct i915_request *rq,
1140 return -ETIME; 1121 return -ETIME;
1141 1122
1142 trace_i915_request_wait_begin(rq, flags); 1123 trace_i915_request_wait_begin(rq, flags);
1143
1144 add_wait_queue(&rq->execute, &exec); 1124 add_wait_queue(&rq->execute, &exec);
1145 if (flags & I915_WAIT_LOCKED)
1146 add_wait_queue(errq, &reset);
1147
1148 intel_wait_init(&wait); 1125 intel_wait_init(&wait);
1149 if (flags & I915_WAIT_PRIORITY) 1126 if (flags & I915_WAIT_PRIORITY)
1150 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); 1127 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
@@ -1155,10 +1132,6 @@ restart:
1155 if (intel_wait_update_request(&wait, rq)) 1132 if (intel_wait_update_request(&wait, rq))
1156 break; 1133 break;
1157 1134
1158 if (flags & I915_WAIT_LOCKED &&
1159 __i915_wait_request_check_and_reset(rq))
1160 continue;
1161
1162 if (signal_pending_state(state, current)) { 1135 if (signal_pending_state(state, current)) {
1163 timeout = -ERESTARTSYS; 1136 timeout = -ERESTARTSYS;
1164 goto complete; 1137 goto complete;
@@ -1188,9 +1161,6 @@ restart:
1188 */ 1161 */
1189 goto wakeup; 1162 goto wakeup;
1190 1163
1191 if (flags & I915_WAIT_LOCKED)
1192 __i915_wait_request_check_and_reset(rq);
1193
1194 for (;;) { 1164 for (;;) {
1195 if (signal_pending_state(state, current)) { 1165 if (signal_pending_state(state, current)) {
1196 timeout = -ERESTARTSYS; 1166 timeout = -ERESTARTSYS;
@@ -1214,21 +1184,6 @@ wakeup:
1214 if (i915_request_completed(rq)) 1184 if (i915_request_completed(rq))
1215 break; 1185 break;
1216 1186
1217 /*
1218 * If the GPU is hung, and we hold the lock, reset the GPU
1219 * and then check for completion. On a full reset, the engine's
1220 * HW seqno will be advanced passed us and we are complete.
1221 * If we do a partial reset, we have to wait for the GPU to
1222 * resume and update the breadcrumb.
1223 *
1224 * If we don't hold the mutex, we can just wait for the worker
1225 * to come along and update the breadcrumb (either directly
1226 * itself, or indirectly by recovering the GPU).
1227 */
1228 if (flags & I915_WAIT_LOCKED &&
1229 __i915_wait_request_check_and_reset(rq))
1230 continue;
1231
1232 /* Only spin if we know the GPU is processing this request */ 1187 /* Only spin if we know the GPU is processing this request */
1233 if (__i915_spin_request(rq, wait.seqno, state, 2)) 1188 if (__i915_spin_request(rq, wait.seqno, state, 2))
1234 break; 1189 break;
@@ -1242,8 +1197,6 @@ wakeup:
1242 intel_engine_remove_wait(rq->engine, &wait); 1197 intel_engine_remove_wait(rq->engine, &wait);
1243complete: 1198complete:
1244 __set_current_state(TASK_RUNNING); 1199 __set_current_state(TASK_RUNNING);
1245 if (flags & I915_WAIT_LOCKED)
1246 remove_wait_queue(errq, &reset);
1247 remove_wait_queue(&rq->execute, &exec); 1200 remove_wait_queue(&rq->execute, &exec);
1248 trace_i915_request_wait_end(rq); 1201 trace_i915_request_wait_end(rq);
1249 1202