aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c42
1 files changed, 13 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f6c78c0fa74b..c88e538b2ef4 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -502,15 +502,6 @@ void __i915_request_unsubmit(struct i915_request *request)
502 /* We may be recursing from the signal callback of another i915 fence */ 502 /* We may be recursing from the signal callback of another i915 fence */
503 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 503 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
504 504
505 /*
506 * As we do not allow WAIT to preempt inflight requests,
507 * once we have executed a request, along with triggering
508 * any execution callbacks, we must preserve its ordering
509 * within the non-preemptible FIFO.
510 */
511 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
512 request->sched.attr.priority |= __NO_PREEMPTION;
513
514 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 505 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
515 i915_request_cancel_breadcrumb(request); 506 i915_request_cancel_breadcrumb(request);
516 507
@@ -582,18 +573,7 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
582 573
583 switch (state) { 574 switch (state) {
584 case FENCE_COMPLETE: 575 case FENCE_COMPLETE:
585 /* 576 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
586 * We only check a small portion of our dependencies
587 * and so cannot guarantee that there remains no
588 * semaphore chain across all. Instead of opting
589 * for the full NOSEMAPHORE boost, we go for the
590 * smaller (but still preempting) boost of
591 * NEWCLIENT. This will be enough to boost over
592 * a busywaiting request (as that cannot be
593 * NEWCLIENT) without accidentally boosting
594 * a busywait over real work elsewhere.
595 */
596 i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
597 break; 577 break;
598 578
599 case FENCE_FREE: 579 case FENCE_FREE:
@@ -874,12 +854,6 @@ emit_semaphore_wait(struct i915_request *to,
874 if (err < 0) 854 if (err < 0)
875 return err; 855 return err;
876 856
877 err = i915_sw_fence_await_dma_fence(&to->semaphore,
878 &from->fence, 0,
879 I915_FENCE_GFP);
880 if (err < 0)
881 return err;
882
883 /* We need to pin the signaler's HWSP until we are finished reading. */ 857 /* We need to pin the signaler's HWSP until we are finished reading. */
884 err = i915_timeline_read_hwsp(from, to, &hwsp_offset); 858 err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
885 if (err) 859 if (err)
@@ -945,8 +919,18 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
945 &from->fence, 0, 919 &from->fence, 0,
946 I915_FENCE_GFP); 920 I915_FENCE_GFP);
947 } 921 }
922 if (ret < 0)
923 return ret;
948 924
949 return ret < 0 ? ret : 0; 925 if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
926 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
927 &from->fence, 0,
928 I915_FENCE_GFP);
929 if (ret < 0)
930 return ret;
931 }
932
933 return 0;
950} 934}
951 935
952int 936int
@@ -1237,7 +1221,7 @@ void i915_request_add(struct i915_request *request)
1237 * the bulk clients. (FQ_CODEL) 1221 * the bulk clients. (FQ_CODEL)
1238 */ 1222 */
1239 if (list_empty(&request->sched.signalers_list)) 1223 if (list_empty(&request->sched.signalers_list))
1240 attr.priority |= I915_PRIORITY_NEWCLIENT; 1224 attr.priority |= I915_PRIORITY_WAIT;
1241 1225
1242 engine->schedule(request, &attr); 1226 engine->schedule(request, &attr);
1243 } 1227 }