aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2015-03-16 12:00:58 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-03-20 06:48:19 -0400
commit6702cf16e0ba8b0129f5aa1b6609d4e9c70bc13b (patch)
tree603592013e14aeeb0b042f1da3ac70c2ae318482 /drivers/gpu/drm
parent563222a745012cc2bb20c5d5cbff1c1ec4832c05 (diff)
drm/i915: Initialize all contexts
The problem is we're going to switch to a new context, which could be the default context. The plan was to use restore inhibit, which would be fine, except if we are using dynamic page tables (which we will). If we use dynamic page tables and we don't load new page tables, the previous page tables might go away, and future operations will fault. CTXA runs. switch to default, restore inhibit CTXA dies and has its address space taken away. Run CTXB, tries to save using the context A's address space - this fails. The general solution is to make sure every context has it's own state, and its own address space. For cases when we must restore inhibit, first thing we do is load a valid address space. I thought this would be enough, but apparently there are references within the context itself which will refer to the old address space - therefore, we also must reinitialize. v2: to->ppgtt is only valid in full ppgtt. v3: Rebased. v4: Make post PDP update clearer. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index dd9ab36a039b..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -609,7 +609,8 @@ needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
609} 609}
610 610
611static bool 611static bool
612needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to) 612needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
613 u32 hw_flags)
613{ 614{
614 struct drm_i915_private *dev_priv = ring->dev->dev_private; 615 struct drm_i915_private *dev_priv = ring->dev->dev_private;
615 616
@@ -622,7 +623,7 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to)
622 if (ring != &dev_priv->ring[RCS]) 623 if (ring != &dev_priv->ring[RCS])
623 return false; 624 return false;
624 625
625 if (to->ppgtt->pd_dirty_rings) 626 if (hw_flags & MI_RESTORE_INHIBIT)
626 return true; 627 return true;
627 628
628 return false; 629 return false;
@@ -661,9 +662,6 @@ static int do_switch(struct intel_engine_cs *ring,
661 */ 662 */
662 from = ring->last_context; 663 from = ring->last_context;
663 664
664 /* We should never emit switch_mm more than once */
665 WARN_ON(needs_pd_load_pre(ring, to) && needs_pd_load_post(ring, to));
666
667 if (needs_pd_load_pre(ring, to)) { 665 if (needs_pd_load_pre(ring, to)) {
668 /* Older GENs and non render rings still want the load first, 666 /* Older GENs and non render rings still want the load first,
669 * "PP_DCLV followed by PP_DIR_BASE register through Load 667 * "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -706,16 +704,28 @@ static int do_switch(struct intel_engine_cs *ring,
706 goto unpin_out; 704 goto unpin_out;
707 } 705 }
708 706
709 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 707 if (!to->legacy_hw_ctx.initialized) {
710 hw_flags |= MI_RESTORE_INHIBIT; 708 hw_flags |= MI_RESTORE_INHIBIT;
711 else if (to->ppgtt && test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) 709 /* NB: If we inhibit the restore, the context is not allowed to
710 * die because future work may end up depending on valid address
711 * space. This means we must enforce that a page table load
712 * occur when this occurs. */
713 } else if (to->ppgtt &&
714 test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
712 hw_flags |= MI_FORCE_RESTORE; 715 hw_flags |= MI_FORCE_RESTORE;
713 716
717 /* We should never emit switch_mm more than once */
718 WARN_ON(needs_pd_load_pre(ring, to) &&
719 needs_pd_load_post(ring, to, hw_flags));
720
714 ret = mi_set_context(ring, to, hw_flags); 721 ret = mi_set_context(ring, to, hw_flags);
715 if (ret) 722 if (ret)
716 goto unpin_out; 723 goto unpin_out;
717 724
718 if (needs_pd_load_post(ring, to)) { 725 /* GEN8 does *not* require an explicit reload if the PDPs have been
726 * setup, and we do not wish to move them.
727 */
728 if (needs_pd_load_post(ring, to, hw_flags)) {
719 trace_switch_mm(ring, to); 729 trace_switch_mm(ring, to);
720 ret = to->ppgtt->switch_mm(to->ppgtt, ring); 730 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
721 /* The hardware context switch is emitted, but we haven't 731 /* The hardware context switch is emitted, but we haven't
@@ -766,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
766 i915_gem_context_unreference(from); 776 i915_gem_context_unreference(from);
767 } 777 }
768 778
769 uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; 779 uninitialized = !to->legacy_hw_ctx.initialized;
770 to->legacy_hw_ctx.initialized = true; 780 to->legacy_hw_ctx.initialized = true;
771 781
772done: 782done: