diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 111 |
1 files changed, 65 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c55b1f75c980..d36a9755ad91 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3309,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
3309 | 3309 | ||
3310 | static void nop_submit_request(struct i915_request *request) | 3310 | static void nop_submit_request(struct i915_request *request) |
3311 | { | 3311 | { |
3312 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | ||
3313 | request->engine->name, | ||
3314 | request->fence.context, request->fence.seqno); | ||
3315 | dma_fence_set_error(&request->fence, -EIO); | ||
3316 | |||
3317 | i915_request_submit(request); | ||
3318 | } | ||
3319 | |||
3320 | static void nop_complete_submit_request(struct i915_request *request) | ||
3321 | { | ||
3322 | unsigned long flags; | 3312 | unsigned long flags; |
3323 | 3313 | ||
3324 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | 3314 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", |
@@ -3354,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3354 | * rolling the global seqno forward (since this would complete requests | 3344 | * rolling the global seqno forward (since this would complete requests |
3355 | * for which we haven't set the fence error to EIO yet). | 3345 | * for which we haven't set the fence error to EIO yet). |
3356 | */ | 3346 | */ |
3357 | for_each_engine(engine, i915, id) { | 3347 | for_each_engine(engine, i915, id) |
3358 | i915_gem_reset_prepare_engine(engine); | 3348 | i915_gem_reset_prepare_engine(engine); |
3359 | 3349 | ||
3360 | engine->submit_request = nop_submit_request; | ||
3361 | engine->schedule = NULL; | ||
3362 | } | ||
3363 | i915->caps.scheduler = 0; | ||
3364 | |||
3365 | /* Even if the GPU reset fails, it should still stop the engines */ | 3350 | /* Even if the GPU reset fails, it should still stop the engines */ |
3366 | if (INTEL_GEN(i915) >= 5) | 3351 | if (INTEL_GEN(i915) >= 5) |
3367 | intel_gpu_reset(i915, ALL_ENGINES); | 3352 | intel_gpu_reset(i915, ALL_ENGINES); |
3368 | 3353 | ||
3369 | /* | ||
3370 | * Make sure no one is running the old callback before we proceed with | ||
3371 | * cancelling requests and resetting the completion tracking. Otherwise | ||
3372 | * we might submit a request to the hardware which never completes. | ||
3373 | */ | ||
3374 | synchronize_rcu(); | ||
3375 | |||
3376 | for_each_engine(engine, i915, id) { | 3354 | for_each_engine(engine, i915, id) { |
3377 | /* Mark all executing requests as skipped */ | 3355 | engine->submit_request = nop_submit_request; |
3378 | engine->cancel_requests(engine); | 3356 | engine->schedule = NULL; |
3379 | |||
3380 | /* | ||
3381 | * Only once we've force-cancelled all in-flight requests can we | ||
3382 | * start to complete all requests. | ||
3383 | */ | ||
3384 | engine->submit_request = nop_complete_submit_request; | ||
3385 | } | 3357 | } |
3358 | i915->caps.scheduler = 0; | ||
3386 | 3359 | ||
3387 | /* | 3360 | /* |
3388 | * Make sure no request can slip through without getting completed by | 3361 | * Make sure no request can slip through without getting completed by |
3389 | * either this call here to intel_engine_init_global_seqno, or the one | 3362 | * either this call here to intel_engine_init_global_seqno, or the one |
3390 | * in nop_complete_submit_request. | 3363 | * in nop_submit_request. |
3391 | */ | 3364 | */ |
3392 | synchronize_rcu(); | 3365 | synchronize_rcu(); |
3393 | 3366 | ||
3394 | for_each_engine(engine, i915, id) { | 3367 | /* Mark all executing requests as skipped */ |
3395 | unsigned long flags; | 3368 | for_each_engine(engine, i915, id) |
3396 | 3369 | engine->cancel_requests(engine); | |
3397 | /* | ||
3398 | * Mark all pending requests as complete so that any concurrent | ||
3399 | * (lockless) lookup doesn't try and wait upon the request as we | ||
3400 | * reset it. | ||
3401 | */ | ||
3402 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
3403 | intel_engine_init_global_seqno(engine, | ||
3404 | intel_engine_last_submit(engine)); | ||
3405 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
3406 | 3370 | ||
3371 | for_each_engine(engine, i915, id) { | ||
3407 | i915_gem_reset_finish_engine(engine); | 3372 | i915_gem_reset_finish_engine(engine); |
3373 | intel_engine_wakeup(engine); | ||
3408 | } | 3374 | } |
3409 | 3375 | ||
3410 | out: | 3376 | out: |
@@ -5334,7 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |||
5334 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? | 5300 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? |
5335 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); | 5301 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
5336 | 5302 | ||
5337 | intel_gt_workarounds_apply(dev_priv); | 5303 | /* Apply the GT workarounds... */ |
5304 | intel_gt_apply_workarounds(dev_priv); | ||
5305 | /* ...and determine whether they are sticking. */ | ||
5306 | intel_gt_verify_workarounds(dev_priv, "init"); | ||
5338 | 5307 | ||
5339 | i915_gem_init_swizzling(dev_priv); | 5308 | i915_gem_init_swizzling(dev_priv); |
5340 | 5309 | ||
@@ -5529,6 +5498,44 @@ err_active: | |||
5529 | goto out_ctx; | 5498 | goto out_ctx; |
5530 | } | 5499 | } |
5531 | 5500 | ||
5501 | static int | ||
5502 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) | ||
5503 | { | ||
5504 | struct drm_i915_gem_object *obj; | ||
5505 | struct i915_vma *vma; | ||
5506 | int ret; | ||
5507 | |||
5508 | obj = i915_gem_object_create_stolen(i915, size); | ||
5509 | if (!obj) | ||
5510 | obj = i915_gem_object_create_internal(i915, size); | ||
5511 | if (IS_ERR(obj)) { | ||
5512 | DRM_ERROR("Failed to allocate scratch page\n"); | ||
5513 | return PTR_ERR(obj); | ||
5514 | } | ||
5515 | |||
5516 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); | ||
5517 | if (IS_ERR(vma)) { | ||
5518 | ret = PTR_ERR(vma); | ||
5519 | goto err_unref; | ||
5520 | } | ||
5521 | |||
5522 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | ||
5523 | if (ret) | ||
5524 | goto err_unref; | ||
5525 | |||
5526 | i915->gt.scratch = vma; | ||
5527 | return 0; | ||
5528 | |||
5529 | err_unref: | ||
5530 | i915_gem_object_put(obj); | ||
5531 | return ret; | ||
5532 | } | ||
5533 | |||
5534 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) | ||
5535 | { | ||
5536 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); | ||
5537 | } | ||
5538 | |||
5532 | int i915_gem_init(struct drm_i915_private *dev_priv) | 5539 | int i915_gem_init(struct drm_i915_private *dev_priv) |
5533 | { | 5540 | { |
5534 | int ret; | 5541 | int ret; |
@@ -5575,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
5575 | goto err_unlock; | 5582 | goto err_unlock; |
5576 | } | 5583 | } |
5577 | 5584 | ||
5578 | ret = i915_gem_contexts_init(dev_priv); | 5585 | ret = i915_gem_init_scratch(dev_priv, |
5586 | IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); | ||
5579 | if (ret) { | 5587 | if (ret) { |
5580 | GEM_BUG_ON(ret == -EIO); | 5588 | GEM_BUG_ON(ret == -EIO); |
5581 | goto err_ggtt; | 5589 | goto err_ggtt; |
5582 | } | 5590 | } |
5583 | 5591 | ||
5592 | ret = i915_gem_contexts_init(dev_priv); | ||
5593 | if (ret) { | ||
5594 | GEM_BUG_ON(ret == -EIO); | ||
5595 | goto err_scratch; | ||
5596 | } | ||
5597 | |||
5584 | ret = intel_engines_init(dev_priv); | 5598 | ret = intel_engines_init(dev_priv); |
5585 | if (ret) { | 5599 | if (ret) { |
5586 | GEM_BUG_ON(ret == -EIO); | 5600 | GEM_BUG_ON(ret == -EIO); |
@@ -5653,6 +5667,8 @@ err_pm: | |||
5653 | err_context: | 5667 | err_context: |
5654 | if (ret != -EIO) | 5668 | if (ret != -EIO) |
5655 | i915_gem_contexts_fini(dev_priv); | 5669 | i915_gem_contexts_fini(dev_priv); |
5670 | err_scratch: | ||
5671 | i915_gem_fini_scratch(dev_priv); | ||
5656 | err_ggtt: | 5672 | err_ggtt: |
5657 | err_unlock: | 5673 | err_unlock: |
5658 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5674 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
@@ -5704,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) | |||
5704 | intel_uc_fini(dev_priv); | 5720 | intel_uc_fini(dev_priv); |
5705 | i915_gem_cleanup_engines(dev_priv); | 5721 | i915_gem_cleanup_engines(dev_priv); |
5706 | i915_gem_contexts_fini(dev_priv); | 5722 | i915_gem_contexts_fini(dev_priv); |
5723 | i915_gem_fini_scratch(dev_priv); | ||
5707 | mutex_unlock(&dev_priv->drm.struct_mutex); | 5724 | mutex_unlock(&dev_priv->drm.struct_mutex); |
5708 | 5725 | ||
5726 | intel_wa_list_free(&dev_priv->gt_wa_list); | ||
5727 | |||
5709 | intel_cleanup_gt_powersave(dev_priv); | 5728 | intel_cleanup_gt_powersave(dev_priv); |
5710 | 5729 | ||
5711 | intel_uc_fini_misc(dev_priv); | 5730 | intel_uc_fini_misc(dev_priv); |