diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 39 |
1 files changed, 10 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f3e84c44d009..48afa777e94a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev) | |||
135 | 135 | ||
136 | void i915_gem_context_free(struct kref *ctx_ref) | 136 | void i915_gem_context_free(struct kref *ctx_ref) |
137 | { | 137 | { |
138 | struct intel_context *ctx = container_of(ctx_ref, | 138 | struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); |
139 | typeof(*ctx), ref); | ||
140 | 139 | ||
141 | trace_i915_context_free(ctx); | 140 | trace_i915_context_free(ctx); |
142 | 141 | ||
@@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev, | |||
195 | 194 | ||
196 | kref_init(&ctx->ref); | 195 | kref_init(&ctx->ref); |
197 | list_add_tail(&ctx->link, &dev_priv->context_list); | 196 | list_add_tail(&ctx->link, &dev_priv->context_list); |
197 | ctx->i915 = dev_priv; | ||
198 | 198 | ||
199 | if (dev_priv->hw_context_size) { | 199 | if (dev_priv->hw_context_size) { |
200 | struct drm_i915_gem_object *obj = | 200 | struct drm_i915_gem_object *obj = |
@@ -573,20 +573,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring, | |||
573 | struct intel_context *from, | 573 | struct intel_context *from, |
574 | struct intel_context *to) | 574 | struct intel_context *to) |
575 | { | 575 | { |
576 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
577 | |||
578 | if (to->remap_slice) | 576 | if (to->remap_slice) |
579 | return false; | 577 | return false; |
580 | 578 | ||
581 | if (to->ppgtt) { | 579 | if (to->ppgtt && from == to && |
582 | if (from == to && !test_bit(ring->id, | 580 | !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) |
583 | &to->ppgtt->pd_dirty_rings)) | 581 | return true; |
584 | return true; | ||
585 | } else if (dev_priv->mm.aliasing_ppgtt) { | ||
586 | if (from == to && !test_bit(ring->id, | ||
587 | &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings)) | ||
588 | return true; | ||
589 | } | ||
590 | 582 | ||
591 | return false; | 583 | return false; |
592 | } | 584 | } |
@@ -636,7 +628,6 @@ static int do_switch(struct intel_engine_cs *ring, | |||
636 | struct intel_context *from = ring->last_context; | 628 | struct intel_context *from = ring->last_context; |
637 | u32 hw_flags = 0; | 629 | u32 hw_flags = 0; |
638 | bool uninitialized = false; | 630 | bool uninitialized = false; |
639 | struct i915_vma *vma; | ||
640 | int ret, i; | 631 | int ret, i; |
641 | 632 | ||
642 | if (from != NULL && ring == &dev_priv->ring[RCS]) { | 633 | if (from != NULL && ring == &dev_priv->ring[RCS]) { |
@@ -673,7 +664,7 @@ static int do_switch(struct intel_engine_cs *ring, | |||
673 | goto unpin_out; | 664 | goto unpin_out; |
674 | 665 | ||
675 | /* Doing a PD load always reloads the page dirs */ | 666 | /* Doing a PD load always reloads the page dirs */ |
676 | clear_bit(ring->id, &to->ppgtt->pd_dirty_rings); | 667 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); |
677 | } | 668 | } |
678 | 669 | ||
679 | if (ring != &dev_priv->ring[RCS]) { | 670 | if (ring != &dev_priv->ring[RCS]) { |
@@ -694,16 +685,6 @@ static int do_switch(struct intel_engine_cs *ring, | |||
694 | if (ret) | 685 | if (ret) |
695 | goto unpin_out; | 686 | goto unpin_out; |
696 | 687 | ||
697 | vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); | ||
698 | if (!(vma->bound & GLOBAL_BIND)) { | ||
699 | ret = i915_vma_bind(vma, | ||
700 | to->legacy_hw_ctx.rcs_state->cache_level, | ||
701 | GLOBAL_BIND); | ||
702 | /* This shouldn't ever fail. */ | ||
703 | if (WARN_ONCE(ret, "GGTT context bind failed!")) | ||
704 | goto unpin_out; | ||
705 | } | ||
706 | |||
707 | if (!to->legacy_hw_ctx.initialized) { | 688 | if (!to->legacy_hw_ctx.initialized) { |
708 | hw_flags |= MI_RESTORE_INHIBIT; | 689 | hw_flags |= MI_RESTORE_INHIBIT; |
709 | /* NB: If we inhibit the restore, the context is not allowed to | 690 | /* NB: If we inhibit the restore, the context is not allowed to |
@@ -711,12 +692,14 @@ static int do_switch(struct intel_engine_cs *ring, | |||
711 | * space. This means we must enforce that a page table load | 692 | * space. This means we must enforce that a page table load |
712 | * occur when this occurs. */ | 693 | * occur when this occurs. */ |
713 | } else if (to->ppgtt && | 694 | } else if (to->ppgtt && |
714 | test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) | 695 | (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { |
715 | hw_flags |= MI_FORCE_RESTORE; | 696 | hw_flags |= MI_FORCE_RESTORE; |
697 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); | ||
698 | } | ||
716 | 699 | ||
717 | /* We should never emit switch_mm more than once */ | 700 | /* We should never emit switch_mm more than once */ |
718 | WARN_ON(needs_pd_load_pre(ring, to) && | 701 | WARN_ON(needs_pd_load_pre(ring, to) && |
719 | needs_pd_load_post(ring, to, hw_flags)); | 702 | needs_pd_load_post(ring, to, hw_flags)); |
720 | 703 | ||
721 | ret = mi_set_context(ring, to, hw_flags); | 704 | ret = mi_set_context(ring, to, hw_flags); |
722 | if (ret) | 705 | if (ret) |
@@ -768,8 +751,6 @@ static int do_switch(struct intel_engine_cs *ring, | |||
768 | * swapped, but there is no way to do that yet. | 751 | * swapped, but there is no way to do that yet. |
769 | */ | 752 | */ |
770 | from->legacy_hw_ctx.rcs_state->dirty = 1; | 753 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
771 | BUG_ON(i915_gem_request_get_ring( | ||
772 | from->legacy_hw_ctx.rcs_state->last_read_req) != ring); | ||
773 | 754 | ||
774 | /* obj is kept alive until the next request by its active ref */ | 755 | /* obj is kept alive until the next request by its active ref */ |
775 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); | 756 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |