aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2015-03-16 12:00:55 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-03-20 06:48:17 -0400
commit317b4e903636305cfe702ab3e5b3d68547a69e72 (patch)
tree5e910d15ff93ffc2f8548c1dbe22be3dc9e02032
parent07749ef32c4fd60334c2451739460dd1cf600281 (diff)
drm/i915: Extract context switch skip and add pd load logic
In Gen8, PDPs are saved and restored with legacy contexts (legacy contexts only exist on the render ring). So change the ordering of LRI vs MI_SET_CONTEXT for the initialization of the context. Also the only cases in which we need to manually update the PDPs are when MI_RESTORE_INHIBIT has been set in MI_SET_CONTEXT (i.e. when the context is not yet initialized or it is the default context). Legacy submission is not available post GEN8, so it isn't necessary to add extra checks for newer generations. v2: Use new functions to replace the logic right away (Daniel) v3: Add missing pd load logic. v4: Add warning in case pd_load_pre & pd_load_post are true, and add missing trace_switch_mm. Cleaned up pd_load conditions. Add more information about when is pd_load_post needed. (Mika) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c75
1 files changed, 73 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 70346b0028f9..b6ea85dcf9e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -569,6 +569,56 @@ mi_set_context(struct intel_engine_cs *ring,
569 return ret; 569 return ret;
570} 570}
571 571
572static inline bool should_skip_switch(struct intel_engine_cs *ring,
573 struct intel_context *from,
574 struct intel_context *to)
575{
576 if (from == to && !to->remap_slice)
577 return true;
578
579 return false;
580}
581
582static bool
583needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
584{
585 struct drm_i915_private *dev_priv = ring->dev->dev_private;
586
587 if (!to->ppgtt)
588 return false;
589
590 if (INTEL_INFO(ring->dev)->gen < 8)
591 return true;
592
593 if (ring != &dev_priv->ring[RCS])
594 return true;
595
596 return false;
597}
598
599static bool
600needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to)
601{
602 struct drm_i915_private *dev_priv = ring->dev->dev_private;
603
604 if (!to->ppgtt)
605 return false;
606
607 if (!IS_GEN8(ring->dev))
608 return false;
609
610 if (ring != &dev_priv->ring[RCS])
611 return false;
612
613 if (!to->legacy_hw_ctx.initialized)
614 return true;
615
616 if (i915_gem_context_is_default(to))
617 return true;
618
619 return false;
620}
621
572static int do_switch(struct intel_engine_cs *ring, 622static int do_switch(struct intel_engine_cs *ring,
573 struct intel_context *to) 623 struct intel_context *to)
574{ 624{
@@ -584,7 +634,7 @@ static int do_switch(struct intel_engine_cs *ring,
584 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); 634 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
585 } 635 }
586 636
587 if (from == to && !to->remap_slice) 637 if (should_skip_switch(ring, from, to))
588 return 0; 638 return 0;
589 639
590 /* Trying to pin first makes error handling easier. */ 640 /* Trying to pin first makes error handling easier. */
@@ -602,7 +652,14 @@ static int do_switch(struct intel_engine_cs *ring,
602 */ 652 */
603 from = ring->last_context; 653 from = ring->last_context;
604 654
605 if (to->ppgtt) { 655 /* We should never emit switch_mm more than once */
656 WARN_ON(needs_pd_load_pre(ring, to) && needs_pd_load_post(ring, to));
657
658 if (needs_pd_load_pre(ring, to)) {
659 /* Older GENs and non render rings still want the load first,
660 * "PP_DCLV followed by PP_DIR_BASE register through Load
661 * Register Immediate commands in Ring Buffer before submitting
662 * a context."*/
606 trace_switch_mm(ring, to); 663 trace_switch_mm(ring, to);
607 ret = to->ppgtt->switch_mm(to->ppgtt, ring); 664 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
608 if (ret) 665 if (ret)
@@ -644,6 +701,20 @@ static int do_switch(struct intel_engine_cs *ring,
644 if (ret) 701 if (ret)
645 goto unpin_out; 702 goto unpin_out;
646 703
704 if (needs_pd_load_post(ring, to)) {
705 trace_switch_mm(ring, to);
706 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
707 /* The hardware context switch is emitted, but we haven't
708 * actually changed the state - so it's probably safe to bail
709 * here. Still, let the user know something dangerous has
710 * happened.
711 */
712 if (ret) {
713 DRM_ERROR("Failed to change address space on context switch\n");
714 goto unpin_out;
715 }
716 }
717
647 for (i = 0; i < MAX_L3_SLICES; i++) { 718 for (i = 0; i < MAX_L3_SLICES; i++) {
648 if (!(to->remap_slice & (1<<i))) 719 if (!(to->remap_slice & (1<<i)))
649 continue; 720 continue;