aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89834ce19acd..db9688d14912 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2506,7 +2506,9 @@ static bool i915_sg_trim(struct sg_table *orig_st)
2506 new_sg = new_st.sgl; 2506 new_sg = new_st.sgl;
2507 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2507 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2508 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2508 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2509 /* called before being DMA mapped, no need to copy sg->dma_* */ 2509 sg_dma_address(new_sg) = sg_dma_address(sg);
2510 sg_dma_len(new_sg) = sg_dma_len(sg);
2511
2510 new_sg = sg_next(new_sg); 2512 new_sg = sg_next(new_sg);
2511 } 2513 }
2512 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2514 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
@@ -3438,6 +3440,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3438 i915_retire_requests(i915); 3440 i915_retire_requests(i915);
3439 GEM_BUG_ON(i915->gt.active_requests); 3441 GEM_BUG_ON(i915->gt.active_requests);
3440 3442
3443 if (!intel_gpu_reset(i915, ALL_ENGINES))
3444 intel_engines_sanitize(i915);
3445
3441 /* 3446 /*
3442 * Undo nop_submit_request. We prevent all new i915 requests from 3447 * Undo nop_submit_request. We prevent all new i915 requests from
3443 * being queued (by disallowing execbuf whilst wedged) so having 3448 * being queued (by disallowing execbuf whilst wedged) so having
@@ -5414,8 +5419,19 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5414 5419
5415 assert_kernel_context_is_current(i915); 5420 assert_kernel_context_is_current(i915);
5416 5421
5422 /*
5423 * Immediately park the GPU so that we enable powersaving and
5424 * treat it as idle. The next time we issue a request, we will
5425 * unpark and start using the engine->pinned_default_state, otherwise
5426 * it is in limbo and an early reset may fail.
5427 */
5428 __i915_gem_park(i915);
5429
5417 for_each_engine(engine, i915, id) { 5430 for_each_engine(engine, i915, id) {
5418 struct i915_vma *state; 5431 struct i915_vma *state;
5432 void *vaddr;
5433
5434 GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
5419 5435
5420 state = to_intel_context(ctx, engine)->state; 5436 state = to_intel_context(ctx, engine)->state;
5421 if (!state) 5437 if (!state)
@@ -5438,6 +5454,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5438 goto err_active; 5454 goto err_active;
5439 5455
5440 engine->default_state = i915_gem_object_get(state->obj); 5456 engine->default_state = i915_gem_object_get(state->obj);
5457
5458 /* Check we can acquire the image of the context state */
5459 vaddr = i915_gem_object_pin_map(engine->default_state,
5460 I915_MAP_FORCE_WB);
5461 if (IS_ERR(vaddr)) {
5462 err = PTR_ERR(vaddr);
5463 goto err_active;
5464 }
5465
5466 i915_gem_object_unpin_map(engine->default_state);
5441 } 5467 }
5442 5468
5443 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 5469 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {