aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c63
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
3 files changed, 87 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5daad5f75fb..27ea6bdebce7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2737 2737
2738 WARN_ON(i915_verify_lists(ring->dev)); 2738 WARN_ON(i915_verify_lists(ring->dev));
2739 2739
2740 /* Move any buffers on the active list that are no longer referenced 2740 /* Retire requests first as we use it above for the early return.
2741 * by the ringbuffer to the flushing/inactive lists as appropriate, 2741 * If we retire requests last, we may use a later seqno and so clear
2742 * before we free the context associated with the requests. 2742 * the requests lists without clearing the active list, leading to
2743 * confusion.
2743 */ 2744 */
2744 while (!list_empty(&ring->active_list)) {
2745 struct drm_i915_gem_object *obj;
2746
2747 obj = list_first_entry(&ring->active_list,
2748 struct drm_i915_gem_object,
2749 ring_list);
2750
2751 if (!i915_gem_request_completed(obj->last_read_req, true))
2752 break;
2753
2754 i915_gem_object_move_to_inactive(obj);
2755 }
2756
2757
2758 while (!list_empty(&ring->request_list)) { 2745 while (!list_empty(&ring->request_list)) {
2759 struct drm_i915_gem_request *request; 2746 struct drm_i915_gem_request *request;
2760 struct intel_ringbuffer *ringbuf; 2747 struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2789 i915_gem_free_request(request); 2776 i915_gem_free_request(request);
2790 } 2777 }
2791 2778
2779 /* Move any buffers on the active list that are no longer referenced
2780 * by the ringbuffer to the flushing/inactive lists as appropriate,
2781 * before we free the context associated with the requests.
2782 */
2783 while (!list_empty(&ring->active_list)) {
2784 struct drm_i915_gem_object *obj;
2785
2786 obj = list_first_entry(&ring->active_list,
2787 struct drm_i915_gem_object,
2788 ring_list);
2789
2790 if (!i915_gem_request_completed(obj->last_read_req, true))
2791 break;
2792
2793 i915_gem_object_move_to_inactive(obj);
2794 }
2795
2792 if (unlikely(ring->trace_irq_req && 2796 if (unlikely(ring->trace_irq_req &&
2793 i915_gem_request_completed(ring->trace_irq_req, true))) { 2797 i915_gem_request_completed(ring->trace_irq_req, true))) {
2794 ring->irq_put(ring); 2798 ring->irq_put(ring);
@@ -2936,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2936 req = obj->last_read_req; 2940 req = obj->last_read_req;
2937 2941
2938 /* Do this after OLR check to make sure we make forward progress polling 2942 /* Do this after OLR check to make sure we make forward progress polling
2939 * on this IOCTL with a timeout <=0 (like busy ioctl) 2943 * on this IOCTL with a timeout == 0 (like busy ioctl)
2940 */ 2944 */
2941 if (args->timeout_ns <= 0) { 2945 if (args->timeout_ns == 0) {
2942 ret = -ETIME; 2946 ret = -ETIME;
2943 goto out; 2947 goto out;
2944 } 2948 }
@@ -2948,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2948 i915_gem_request_reference(req); 2952 i915_gem_request_reference(req);
2949 mutex_unlock(&dev->struct_mutex); 2953 mutex_unlock(&dev->struct_mutex);
2950 2954
2951 ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, 2955 ret = __i915_wait_request(req, reset_counter, true,
2956 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2952 file->driver_priv); 2957 file->driver_priv);
2953 mutex_lock(&dev->struct_mutex); 2958 mutex_lock(&dev->struct_mutex);
2954 i915_gem_request_unreference(req); 2959 i915_gem_request_unreference(req);
@@ -4792,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev)
4792 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4797 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4793 return -EIO; 4798 return -EIO;
4794 4799
4800 /* Double layer security blanket, see i915_gem_init() */
4801 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4802
4795 if (dev_priv->ellc_size) 4803 if (dev_priv->ellc_size)
4796 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4804 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4797 4805
@@ -4824,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev)
4824 for_each_ring(ring, dev_priv, i) { 4832 for_each_ring(ring, dev_priv, i) {
4825 ret = ring->init_hw(ring); 4833 ret = ring->init_hw(ring);
4826 if (ret) 4834 if (ret)
4827 return ret; 4835 goto out;
4828 } 4836 }
4829 4837
4830 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4838 for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4841,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev)
4841 DRM_ERROR("Context enable failed %d\n", ret); 4849 DRM_ERROR("Context enable failed %d\n", ret);
4842 i915_gem_cleanup_ringbuffer(dev); 4850 i915_gem_cleanup_ringbuffer(dev);
4843 4851
4844 return ret; 4852 goto out;
4845 } 4853 }
4846 4854
4855out:
4856 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4847 return ret; 4857 return ret;
4848} 4858}
4849 4859
@@ -4877,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev)
4877 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4887 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4878 } 4888 }
4879 4889
4890 /* This is just a security blanket to placate dragons.
4891 * On some systems, we very sporadically observe that the first TLBs
4892 * used by the CS may be stale, despite us poking the TLB reset. If
4893 * we hold the forcewake during initialisation these problems
4894 * just magically go away.
4895 */
4896 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4897
4880 ret = i915_gem_init_userptr(dev); 4898 ret = i915_gem_init_userptr(dev);
4881 if (ret) 4899 if (ret)
4882 goto out_unlock; 4900 goto out_unlock;
@@ -4903,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev)
4903 } 4921 }
4904 4922
4905out_unlock: 4923out_unlock:
4924 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4906 mutex_unlock(&dev->struct_mutex); 4925 mutex_unlock(&dev->struct_mutex);
4907 4926
4908 return ret; 4927 return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e730789b53b7..f75173c20f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include <drm/drm_atomic.h>
40#include <drm/drm_atomic_helper.h> 41#include <drm/drm_atomic_helper.h>
41#include <drm/drm_dp_helper.h> 42#include <drm/drm_dp_helper.h>
42#include <drm/drm_crtc_helper.h> 43#include <drm/drm_crtc_helper.h>
@@ -2416,6 +2417,14 @@ out_unref_obj:
2416 return false; 2417 return false;
2417} 2418}
2418 2419
2420/* Update plane->state->fb to match plane->fb after driver-internal updates */
2421static void
2422update_state_fb(struct drm_plane *plane)
2423{
2424 if (plane->fb != plane->state->fb)
2425 drm_atomic_set_fb_for_plane(plane->state, plane->fb);
2426}
2427
2419static void 2428static void
2420intel_find_plane_obj(struct intel_crtc *intel_crtc, 2429intel_find_plane_obj(struct intel_crtc *intel_crtc,
2421 struct intel_initial_plane_config *plane_config) 2430 struct intel_initial_plane_config *plane_config)
@@ -2429,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2429 if (!intel_crtc->base.primary->fb) 2438 if (!intel_crtc->base.primary->fb)
2430 return; 2439 return;
2431 2440
2432 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2441 if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
2442 struct drm_plane *primary = intel_crtc->base.primary;
2443
2444 primary->state->crtc = &intel_crtc->base;
2445 primary->crtc = &intel_crtc->base;
2446 update_state_fb(primary);
2447
2433 return; 2448 return;
2449 }
2434 2450
2435 kfree(intel_crtc->base.primary->fb); 2451 kfree(intel_crtc->base.primary->fb);
2436 intel_crtc->base.primary->fb = NULL; 2452 intel_crtc->base.primary->fb = NULL;
@@ -2453,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2453 continue; 2469 continue;
2454 2470
2455 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2471 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2472 struct drm_plane *primary = intel_crtc->base.primary;
2473
2456 if (obj->tiling_mode != I915_TILING_NONE) 2474 if (obj->tiling_mode != I915_TILING_NONE)
2457 dev_priv->preserve_bios_swizzle = true; 2475 dev_priv->preserve_bios_swizzle = true;
2458 2476
2459 drm_framebuffer_reference(c->primary->fb); 2477 drm_framebuffer_reference(c->primary->fb);
2460 intel_crtc->base.primary->fb = c->primary->fb; 2478 primary->fb = c->primary->fb;
2479 primary->state->crtc = &intel_crtc->base;
2480 primary->crtc = &intel_crtc->base;
2461 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2481 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2462 break; 2482 break;
2463 } 2483 }
2464 } 2484 }
2485
2486 update_state_fb(intel_crtc->base.primary);
2465} 2487}
2466 2488
2467static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2489static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -6602,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6602 struct drm_framebuffer *fb; 6624 struct drm_framebuffer *fb;
6603 struct intel_framebuffer *intel_fb; 6625 struct intel_framebuffer *intel_fb;
6604 6626
6627 val = I915_READ(DSPCNTR(plane));
6628 if (!(val & DISPLAY_PLANE_ENABLE))
6629 return;
6630
6605 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6631 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6606 if (!intel_fb) { 6632 if (!intel_fb) {
6607 DRM_DEBUG_KMS("failed to alloc fb\n"); 6633 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -6610,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6610 6636
6611 fb = &intel_fb->base; 6637 fb = &intel_fb->base;
6612 6638
6613 val = I915_READ(DSPCNTR(plane));
6614
6615 if (INTEL_INFO(dev)->gen >= 4) 6639 if (INTEL_INFO(dev)->gen >= 4)
6616 if (val & DISPPLANE_TILED) 6640 if (val & DISPPLANE_TILED)
6617 plane_config->tiling = I915_TILING_X; 6641 plane_config->tiling = I915_TILING_X;
@@ -7643,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7643 fb = &intel_fb->base; 7667 fb = &intel_fb->base;
7644 7668
7645 val = I915_READ(PLANE_CTL(pipe, 0)); 7669 val = I915_READ(PLANE_CTL(pipe, 0));
7670 if (!(val & PLANE_CTL_ENABLE))
7671 goto error;
7672
7646 if (val & PLANE_CTL_TILED_MASK) 7673 if (val & PLANE_CTL_TILED_MASK)
7647 plane_config->tiling = I915_TILING_X; 7674 plane_config->tiling = I915_TILING_X;
7648 7675
@@ -7730,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7730 struct drm_framebuffer *fb; 7757 struct drm_framebuffer *fb;
7731 struct intel_framebuffer *intel_fb; 7758 struct intel_framebuffer *intel_fb;
7732 7759
7760 val = I915_READ(DSPCNTR(pipe));
7761 if (!(val & DISPLAY_PLANE_ENABLE))
7762 return;
7763
7733 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7764 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7734 if (!intel_fb) { 7765 if (!intel_fb) {
7735 DRM_DEBUG_KMS("failed to alloc fb\n"); 7766 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -7738,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7738 7769
7739 fb = &intel_fb->base; 7770 fb = &intel_fb->base;
7740 7771
7741 val = I915_READ(DSPCNTR(pipe));
7742
7743 if (INTEL_INFO(dev)->gen >= 4) 7772 if (INTEL_INFO(dev)->gen >= 4)
7744 if (val & DISPPLANE_TILED) 7773 if (val & DISPPLANE_TILED)
7745 plane_config->tiling = I915_TILING_X; 7774 plane_config->tiling = I915_TILING_X;
@@ -9716,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9716 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9745 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9718 9747
9719 WARN_ON(!in_irq()); 9748 WARN_ON(!in_interrupt());
9720 9749
9721 if (crtc == NULL) 9750 if (crtc == NULL)
9722 return; 9751 return;
@@ -9816,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9816 drm_gem_object_reference(&obj->base); 9845 drm_gem_object_reference(&obj->base);
9817 9846
9818 crtc->primary->fb = fb; 9847 crtc->primary->fb = fb;
9848 update_state_fb(crtc->primary);
9819 9849
9820 work->pending_flip_obj = obj; 9850 work->pending_flip_obj = obj;
9821 9851
@@ -9884,6 +9914,7 @@ cleanup_unpin:
9884cleanup_pending: 9914cleanup_pending:
9885 atomic_dec(&intel_crtc->unpin_work_count); 9915 atomic_dec(&intel_crtc->unpin_work_count);
9886 crtc->primary->fb = old_fb; 9916 crtc->primary->fb = old_fb;
9917 update_state_fb(crtc->primary);
9887 drm_gem_object_unreference(&work->old_fb_obj->base); 9918 drm_gem_object_unreference(&work->old_fb_obj->base);
9888 drm_gem_object_unreference(&obj->base); 9919 drm_gem_object_unreference(&obj->base);
9889 mutex_unlock(&dev->struct_mutex); 9920 mutex_unlock(&dev->struct_mutex);
@@ -13718,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
13718 to_intel_crtc(c)->pipe); 13749 to_intel_crtc(c)->pipe);
13719 drm_framebuffer_unreference(c->primary->fb); 13750 drm_framebuffer_unreference(c->primary->fb);
13720 c->primary->fb = NULL; 13751 c->primary->fb = NULL;
13752 update_state_fb(c->primary);
13721 } 13753 }
13722 } 13754 }
13723 mutex_unlock(&dev->struct_mutex); 13755 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3baa53d5..4e8fb891d4ea 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1048 1048
1049 /* We need to init first for ECOBUS access and then 1049 /* We need to init first for ECOBUS access and then
1050 * determine later if we want to reinit, in case of MT access is 1050 * determine later if we want to reinit, in case of MT access is
1051 * not working 1051 * not working. In this stage we don't know which flavour this
1052 * ivb is, so it is better to reset also the gen6 fw registers
1053 * before the ecobus check.
1052 */ 1054 */
1055
1056 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1057 __raw_posting_read(dev_priv, ECOBUS);
1058
1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1059 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1060 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1055 1061