aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-04-09 04:19:42 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-05-05 03:08:38 -0400
commit6099032045d4d83bf643b5fe33caaa8e56e7f5de (patch)
tree6b5853a35e76e63127b8cbfed73c2e0e53dfb1c8 /drivers/gpu/drm/i915/i915_gem.c
parente3efda49e736b8b0de3a5adb45e412cf90fdaf8d (diff)
drm/i915: Allow the module to load even if we fail to setup rings
Even without enabling the ringbuffers to allow command execution, we can still control the display engines to enable modesetting. So make the ringbuffer initialization failure soft, and mark the GPU as wedged instead. v2: Only treat an EIO from ring initialisation as a soft failure, and abort module load for any other failure, such as allocation failures. v3: Add an *ERROR* prior to declaring the GPU wedged so that it stands out like a sore thumb in the logs Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f766d5f94d93..89dbb1bb43e2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4452,15 +4452,11 @@ i915_gem_init_hw(struct drm_device *dev)
4452 * the do_switch), but before enabling PPGTT. So don't move this. 4452 * the do_switch), but before enabling PPGTT. So don't move this.
4453 */ 4453 */
4454 ret = i915_gem_context_enable(dev_priv); 4454 ret = i915_gem_context_enable(dev_priv);
4455 if (ret) { 4455 if (ret && ret != -EIO) {
4456 DRM_ERROR("Context enable failed %d\n", ret); 4456 DRM_ERROR("Context enable failed %d\n", ret);
4457 goto err_out; 4457 i915_gem_cleanup_ringbuffer(dev);
4458 } 4458 }
4459 4459
4460 return 0;
4461
4462err_out:
4463 i915_gem_cleanup_ringbuffer(dev);
4464 return ret; 4460 return ret;
4465} 4461}
4466 4462
@@ -4487,18 +4483,21 @@ int i915_gem_init(struct drm_device *dev)
4487 } 4483 }
4488 4484
4489 ret = i915_gem_init_hw(dev); 4485 ret = i915_gem_init_hw(dev);
4490 mutex_unlock(&dev->struct_mutex); 4486 if (ret == -EIO) {
4491 if (ret) { 4487 /* Allow ring initialisation to fail by marking the GPU as
4492 WARN_ON(dev_priv->mm.aliasing_ppgtt); 4488 * wedged. But we only want to do this where the GPU is angry,
4493 i915_gem_context_fini(dev); 4489 * for all other failure, such as an allocation failure, bail.
4494 drm_mm_takedown(&dev_priv->gtt.base.mm); 4490 */
4495 return ret; 4491 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4492 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4493 ret = 0;
4496 } 4494 }
4495 mutex_unlock(&dev->struct_mutex);
4497 4496
4498 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ 4497 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4499 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4498 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4500 dev_priv->dri1.allow_batchbuffer = 1; 4499 dev_priv->dri1.allow_batchbuffer = 1;
4501 return 0; 4500 return ret;
4502} 4501}
4503 4502
4504void 4503void