aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_active.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c27
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c69
6 files changed, 94 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 215b6ff8aa73..db7bb5bd5add 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
163 struct i915_request *rq) 163 struct i915_request *rq)
164{ 164{
165 struct i915_active_request *active; 165 struct i915_active_request *active;
166 int err = 0;
167
168 /* Prevent reaping in case we malloc/wait while building the tree */
169 i915_active_acquire(ref);
166 170
167 active = active_instance(ref, timeline); 171 active = active_instance(ref, timeline);
168 if (IS_ERR(active)) 172 if (IS_ERR(active)) {
169 return PTR_ERR(active); 173 err = PTR_ERR(active);
174 goto out;
175 }
170 176
171 if (!i915_active_request_isset(active)) 177 if (!i915_active_request_isset(active))
172 ref->count++; 178 ref->count++;
173 __i915_active_request_set(active, rq); 179 __i915_active_request_set(active, rq);
174 180
175 GEM_BUG_ON(!ref->count); 181 GEM_BUG_ON(!ref->count);
176 return 0; 182out:
183 i915_active_release(ref);
184 return err;
177} 185}
178 186
179bool i915_active_acquire(struct i915_active *ref) 187bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
223int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) 231int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
224{ 232{
225 struct active_node *it, *n; 233 struct active_node *it, *n;
226 int ret; 234 int err = 0;
227 235
228 ret = i915_request_await_active_request(rq, &ref->last); 236 /* await allocates and so we need to avoid hitting the shrinker */
229 if (ret) 237 if (i915_active_acquire(ref))
230 return ret; 238 goto out; /* was idle */
239
240 err = i915_request_await_active_request(rq, &ref->last);
241 if (err)
242 goto out;
231 243
232 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { 244 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
233 ret = i915_request_await_active_request(rq, &it->base); 245 err = i915_request_await_active_request(rq, &it->base);
234 if (ret) 246 if (err)
235 return ret; 247 goto out;
236 } 248 }
237 249
238 return 0; 250out:
251 i915_active_release(ref);
252 return err;
239} 253}
240 254
241#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 255#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6728ea5c71d4..30d516e975c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
1688 if (vma->vm_file != filp) 1688 if (vma->vm_file != filp)
1689 return false; 1689 return false;
1690 1690
1691 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; 1691 return vma->vm_start == addr &&
1692 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
1692} 1693}
1693 1694
1694/** 1695/**
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d01683167c77..8bc042551692 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -223,8 +223,14 @@ out:
223 return &p->requests[idx]; 223 return &p->requests[idx];
224} 224}
225 225
226struct sched_cache {
227 struct list_head *priolist;
228};
229
226static struct intel_engine_cs * 230static struct intel_engine_cs *
227sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) 231sched_lock_engine(const struct i915_sched_node *node,
232 struct intel_engine_cs *locked,
233 struct sched_cache *cache)
228{ 234{
229 struct intel_engine_cs *engine = node_to_request(node)->engine; 235 struct intel_engine_cs *engine = node_to_request(node)->engine;
230 236
@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
232 238
233 if (engine != locked) { 239 if (engine != locked) {
234 spin_unlock(&locked->timeline.lock); 240 spin_unlock(&locked->timeline.lock);
241 memset(cache, 0, sizeof(*cache));
235 spin_lock(&engine->timeline.lock); 242 spin_lock(&engine->timeline.lock);
236 } 243 }
237 244
@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
253static void __i915_schedule(struct i915_request *rq, 260static void __i915_schedule(struct i915_request *rq,
254 const struct i915_sched_attr *attr) 261 const struct i915_sched_attr *attr)
255{ 262{
256 struct list_head *uninitialized_var(pl); 263 struct intel_engine_cs *engine;
257 struct intel_engine_cs *engine, *last;
258 struct i915_dependency *dep, *p; 264 struct i915_dependency *dep, *p;
259 struct i915_dependency stack; 265 struct i915_dependency stack;
260 const int prio = attr->priority; 266 const int prio = attr->priority;
267 struct sched_cache cache;
261 LIST_HEAD(dfs); 268 LIST_HEAD(dfs);
262 269
263 /* Needed in order to use the temporary link inside i915_dependency */ 270 /* Needed in order to use the temporary link inside i915_dependency */
@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
328 __list_del_entry(&stack.dfs_link); 335 __list_del_entry(&stack.dfs_link);
329 } 336 }
330 337
331 last = NULL; 338 memset(&cache, 0, sizeof(cache));
332 engine = rq->engine; 339 engine = rq->engine;
333 spin_lock_irq(&engine->timeline.lock); 340 spin_lock_irq(&engine->timeline.lock);
334 341
@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
338 345
339 INIT_LIST_HEAD(&dep->dfs_link); 346 INIT_LIST_HEAD(&dep->dfs_link);
340 347
341 engine = sched_lock_engine(node, engine); 348 engine = sched_lock_engine(node, engine, &cache);
342 lockdep_assert_held(&engine->timeline.lock); 349 lockdep_assert_held(&engine->timeline.lock);
343 350
344 /* Recheck after acquiring the engine->timeline.lock */ 351 /* Recheck after acquiring the engine->timeline.lock */
@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
347 354
348 node->attr.priority = prio; 355 node->attr.priority = prio;
349 if (!list_empty(&node->link)) { 356 if (!list_empty(&node->link)) {
350 if (last != engine) { 357 if (!cache.priolist)
351 pl = i915_sched_lookup_priolist(engine, prio); 358 cache.priolist =
352 last = engine; 359 i915_sched_lookup_priolist(engine,
353 } 360 prio);
354 list_move_tail(&node->link, pl); 361 list_move_tail(&node->link, cache.priolist);
355 } else { 362 } else {
356 /* 363 /*
357 * If the request is not in the priolist queue because 364 * If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cacaa1d04d17..09ed90c0ba00 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
106 106
107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, 107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
108 &rq->fence.flags)); 108 &rq->fence.flags));
109 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
110
111 /*
112 * We may race with direct invocation of
113 * dma_fence_signal(), e.g. i915_request_retire(),
114 * in which case we can skip processing it ourselves.
115 */
116 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
117 &rq->fence.flags))
118 continue;
119 109
120 /* 110 /*
121 * Queue for execution after dropping the signaling 111 * Queue for execution after dropping the signaling
@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
123 * more signalers to the same context or engine. 113 * more signalers to the same context or engine.
124 */ 114 */
125 i915_request_get(rq); 115 i915_request_get(rq);
116
117 /*
118 * We may race with direct invocation of
119 * dma_fence_signal(), e.g. i915_request_retire(),
120 * so we need to acquire our reference to the request
121 * before we cancel the breadcrumb.
122 */
123 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
126 list_add_tail(&rq->signal_link, &signal); 124 list_add_tail(&rq->signal_link, &signal);
127 } 125 }
128 126
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca705546a0ab..14d580cdefd3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
3568{ 3568{
3569 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 3569 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
3570 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); 3570 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
3571
3572 if (conn_state->content_protection ==
3573 DRM_MODE_CONTENT_PROTECTION_DESIRED)
3574 intel_hdcp_enable(to_intel_connector(conn_state->connector));
3575 else if (conn_state->content_protection ==
3576 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
3577 intel_hdcp_disable(to_intel_connector(conn_state->connector));
3571} 3578}
3572 3579
3573static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, 3580static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
3962 goto out; 3969 goto out;
3963 3970
3964 ret = drm_atomic_commit(state); 3971 ret = drm_atomic_commit(state);
3965 if (ret) 3972out:
3966 goto out;
3967
3968 return 0;
3969
3970 out:
3971 drm_atomic_state_put(state); 3973 drm_atomic_state_put(state);
3972 3974
3973 return ret; 3975 return ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index d00d0bb07784..7eb58a9d1319 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
710 unsigned int flags, 710 unsigned int flags,
711 struct i915_gem_context *ctx, 711 struct i915_gem_context *ctx,
712 struct intel_engine_cs *engine, 712 struct intel_engine_cs *engine,
713 struct igt_spinner **spin_out) 713 struct igt_spinner **spin)
714{ 714{
715 int ret = 0; 715 struct i915_request *rq;
716 716 int ret;
717 if (flags & (TEST_BUSY | TEST_RESET)) {
718 struct igt_spinner *spin;
719 struct i915_request *rq;
720 717
721 spin = kzalloc(sizeof(*spin), GFP_KERNEL); 718 *spin = NULL;
722 if (!spin) { 719 if (!(flags & (TEST_BUSY | TEST_RESET)))
723 ret = -ENOMEM; 720 return 0;
724 goto out;
725 }
726 721
727 ret = igt_spinner_init(spin, i915); 722 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
728 if (ret) 723 if (!*spin)
729 return ret; 724 return -ENOMEM;
730 725
731 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); 726 ret = igt_spinner_init(*spin, i915);
732 if (IS_ERR(rq)) { 727 if (ret)
733 ret = PTR_ERR(rq); 728 goto err_free;
734 igt_spinner_fini(spin);
735 kfree(spin);
736 goto out;
737 }
738 729
739 i915_request_add(rq); 730 rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
731 if (IS_ERR(rq)) {
732 ret = PTR_ERR(rq);
733 goto err_fini;
734 }
740 735
741 if (!igt_wait_for_spinner(spin, rq)) { 736 i915_request_add(rq);
742 pr_err("%s: Spinner failed to start!\n", name);
743 igt_spinner_end(spin);
744 igt_spinner_fini(spin);
745 kfree(spin);
746 ret = -ETIMEDOUT;
747 goto out;
748 }
749 737
750 *spin_out = spin; 738 if (!igt_wait_for_spinner(*spin, rq)) {
739 pr_err("%s: Spinner failed to start!\n", name);
740 ret = -ETIMEDOUT;
741 goto err_end;
751 } 742 }
752 743
753out: 744 return 0;
745
746err_end:
747 igt_spinner_end(*spin);
748err_fini:
749 igt_spinner_fini(*spin);
750err_free:
751 kfree(fetch_and_zero(spin));
754 return ret; 752 return ret;
755} 753}
756 754
@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
897 895
898 ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); 896 ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
899 if (ret) 897 if (ret)
900 goto out; 898 goto out_context;
901 899
902 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); 900 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
903 if (ret) 901 if (ret)
904 goto out; 902 goto out_spin;
905 903
906 ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, 904 ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
907 hweight32(sseu.slice_mask), spin); 905 hweight32(sseu.slice_mask), spin);
908 906
909out: 907out_spin:
910 if (spin) { 908 if (spin) {
911 igt_spinner_end(spin); 909 igt_spinner_end(spin);
912 igt_spinner_fini(spin); 910 igt_spinner_fini(spin);
913 kfree(spin); 911 kfree(spin);
914 } 912 }
915 913
914out_context:
916 kernel_context_close(kctx); 915 kernel_context_close(kctx);
917 916
918 return ret; 917 return ret;