diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 849 |
1 files changed, 561 insertions, 288 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bea7f3aef2b0..9228ec018e98 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1095,7 +1095,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | |||
| 1095 | static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) | 1095 | static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) |
| 1096 | { | 1096 | { |
| 1097 | struct drm_i915_private *dev_priv = dev->dev_private; | 1097 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1098 | u32 reg = PIPEDSL(pipe); | 1098 | i915_reg_t reg = PIPEDSL(pipe); |
| 1099 | u32 line1, line2; | 1099 | u32 line1, line2; |
| 1100 | u32 line_mask; | 1100 | u32 line_mask; |
| 1101 | 1101 | ||
| @@ -1135,7 +1135,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) | |||
| 1135 | enum pipe pipe = crtc->pipe; | 1135 | enum pipe pipe = crtc->pipe; |
| 1136 | 1136 | ||
| 1137 | if (INTEL_INFO(dev)->gen >= 4) { | 1137 | if (INTEL_INFO(dev)->gen >= 4) { |
| 1138 | int reg = PIPECONF(cpu_transcoder); | 1138 | i915_reg_t reg = PIPECONF(cpu_transcoder); |
| 1139 | 1139 | ||
| 1140 | /* Wait for the Pipe State to go off */ | 1140 | /* Wait for the Pipe State to go off */ |
| 1141 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, | 1141 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
| @@ -1285,7 +1285,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, | |||
| 1285 | enum pipe pipe) | 1285 | enum pipe pipe) |
| 1286 | { | 1286 | { |
| 1287 | struct drm_device *dev = dev_priv->dev; | 1287 | struct drm_device *dev = dev_priv->dev; |
| 1288 | int pp_reg; | 1288 | i915_reg_t pp_reg; |
| 1289 | u32 val; | 1289 | u32 val; |
| 1290 | enum pipe panel_pipe = PIPE_A; | 1290 | enum pipe panel_pipe = PIPE_A; |
| 1291 | bool locked = true; | 1291 | bool locked = true; |
| @@ -1480,8 +1480,7 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, | |||
| 1480 | return false; | 1480 | return false; |
| 1481 | 1481 | ||
| 1482 | if (HAS_PCH_CPT(dev_priv->dev)) { | 1482 | if (HAS_PCH_CPT(dev_priv->dev)) { |
| 1483 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); | 1483 | u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); |
| 1484 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); | ||
| 1485 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) | 1484 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
| 1486 | return false; | 1485 | return false; |
| 1487 | } else if (IS_CHERRYVIEW(dev_priv->dev)) { | 1486 | } else if (IS_CHERRYVIEW(dev_priv->dev)) { |
| @@ -1545,12 +1544,13 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | |||
| 1545 | } | 1544 | } |
| 1546 | 1545 | ||
| 1547 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1546 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
| 1548 | enum pipe pipe, int reg, u32 port_sel) | 1547 | enum pipe pipe, i915_reg_t reg, |
| 1548 | u32 port_sel) | ||
| 1549 | { | 1549 | { |
| 1550 | u32 val = I915_READ(reg); | 1550 | u32 val = I915_READ(reg); |
| 1551 | I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), | 1551 | I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
| 1552 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1552 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
| 1553 | reg, pipe_name(pipe)); | 1553 | i915_mmio_reg_offset(reg), pipe_name(pipe)); |
| 1554 | 1554 | ||
| 1555 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 | 1555 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 |
| 1556 | && (val & DP_PIPEB_SELECT), | 1556 | && (val & DP_PIPEB_SELECT), |
| @@ -1558,12 +1558,12 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | |||
| 1558 | } | 1558 | } |
| 1559 | 1559 | ||
| 1560 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | 1560 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
| 1561 | enum pipe pipe, int reg) | 1561 | enum pipe pipe, i915_reg_t reg) |
| 1562 | { | 1562 | { |
| 1563 | u32 val = I915_READ(reg); | 1563 | u32 val = I915_READ(reg); |
| 1564 | I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), | 1564 | I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
| 1565 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | 1565 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
| 1566 | reg, pipe_name(pipe)); | 1566 | i915_mmio_reg_offset(reg), pipe_name(pipe)); |
| 1567 | 1567 | ||
| 1568 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 | 1568 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 |
| 1569 | && (val & SDVO_PIPE_B_SELECT), | 1569 | && (val & SDVO_PIPE_B_SELECT), |
| @@ -1599,7 +1599,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc, | |||
| 1599 | { | 1599 | { |
| 1600 | struct drm_device *dev = crtc->base.dev; | 1600 | struct drm_device *dev = crtc->base.dev; |
| 1601 | struct drm_i915_private *dev_priv = dev->dev_private; | 1601 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1602 | int reg = DPLL(crtc->pipe); | 1602 | i915_reg_t reg = DPLL(crtc->pipe); |
| 1603 | u32 dpll = pipe_config->dpll_hw_state.dpll; | 1603 | u32 dpll = pipe_config->dpll_hw_state.dpll; |
| 1604 | 1604 | ||
| 1605 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1605 | assert_pipe_disabled(dev_priv, crtc->pipe); |
| @@ -1688,7 +1688,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) | |||
| 1688 | { | 1688 | { |
| 1689 | struct drm_device *dev = crtc->base.dev; | 1689 | struct drm_device *dev = crtc->base.dev; |
| 1690 | struct drm_i915_private *dev_priv = dev->dev_private; | 1690 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1691 | int reg = DPLL(crtc->pipe); | 1691 | i915_reg_t reg = DPLL(crtc->pipe); |
| 1692 | u32 dpll = crtc->config->dpll_hw_state.dpll; | 1692 | u32 dpll = crtc->config->dpll_hw_state.dpll; |
| 1693 | 1693 | ||
| 1694 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1694 | assert_pipe_disabled(dev_priv, crtc->pipe); |
| @@ -1837,7 +1837,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, | |||
| 1837 | unsigned int expected_mask) | 1837 | unsigned int expected_mask) |
| 1838 | { | 1838 | { |
| 1839 | u32 port_mask; | 1839 | u32 port_mask; |
| 1840 | int dpll_reg; | 1840 | i915_reg_t dpll_reg; |
| 1841 | 1841 | ||
| 1842 | switch (dport->port) { | 1842 | switch (dport->port) { |
| 1843 | case PORT_B: | 1843 | case PORT_B: |
| @@ -1962,7 +1962,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
| 1962 | struct drm_device *dev = dev_priv->dev; | 1962 | struct drm_device *dev = dev_priv->dev; |
| 1963 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1963 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 1964 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1964 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1965 | uint32_t reg, val, pipeconf_val; | 1965 | i915_reg_t reg; |
| 1966 | uint32_t val, pipeconf_val; | ||
| 1966 | 1967 | ||
| 1967 | /* PCH only available on ILK+ */ | 1968 | /* PCH only available on ILK+ */ |
| 1968 | BUG_ON(!HAS_PCH_SPLIT(dev)); | 1969 | BUG_ON(!HAS_PCH_SPLIT(dev)); |
| @@ -2051,7 +2052,8 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
| 2051 | enum pipe pipe) | 2052 | enum pipe pipe) |
| 2052 | { | 2053 | { |
| 2053 | struct drm_device *dev = dev_priv->dev; | 2054 | struct drm_device *dev = dev_priv->dev; |
| 2054 | uint32_t reg, val; | 2055 | i915_reg_t reg; |
| 2056 | uint32_t val; | ||
| 2055 | 2057 | ||
| 2056 | /* FDI relies on the transcoder */ | 2058 | /* FDI relies on the transcoder */ |
| 2057 | assert_fdi_tx_disabled(dev_priv, pipe); | 2059 | assert_fdi_tx_disabled(dev_priv, pipe); |
| @@ -2068,7 +2070,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
| 2068 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | 2070 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
| 2069 | DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); | 2071 | DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); |
| 2070 | 2072 | ||
| 2071 | if (!HAS_PCH_IBX(dev)) { | 2073 | if (HAS_PCH_CPT(dev)) { |
| 2072 | /* Workaround: Clear the timing override chicken bit again. */ | 2074 | /* Workaround: Clear the timing override chicken bit again. */ |
| 2073 | reg = TRANS_CHICKEN2(pipe); | 2075 | reg = TRANS_CHICKEN2(pipe); |
| 2074 | val = I915_READ(reg); | 2076 | val = I915_READ(reg); |
| @@ -2106,10 +2108,9 @@ static void intel_enable_pipe(struct intel_crtc *crtc) | |||
| 2106 | struct drm_device *dev = crtc->base.dev; | 2108 | struct drm_device *dev = crtc->base.dev; |
| 2107 | struct drm_i915_private *dev_priv = dev->dev_private; | 2109 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2108 | enum pipe pipe = crtc->pipe; | 2110 | enum pipe pipe = crtc->pipe; |
| 2109 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 2111 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; |
| 2110 | pipe); | ||
| 2111 | enum pipe pch_transcoder; | 2112 | enum pipe pch_transcoder; |
| 2112 | int reg; | 2113 | i915_reg_t reg; |
| 2113 | u32 val; | 2114 | u32 val; |
| 2114 | 2115 | ||
| 2115 | DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); | 2116 | DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); |
| @@ -2170,7 +2171,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) | |||
| 2170 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 2171 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 2171 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; | 2172 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; |
| 2172 | enum pipe pipe = crtc->pipe; | 2173 | enum pipe pipe = crtc->pipe; |
| 2173 | int reg; | 2174 | i915_reg_t reg; |
| 2174 | u32 val; | 2175 | u32 val; |
| 2175 | 2176 | ||
| 2176 | DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); | 2177 | DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); |
| @@ -2269,20 +2270,20 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height, | |||
| 2269 | fb_format_modifier, 0)); | 2270 | fb_format_modifier, 0)); |
| 2270 | } | 2271 | } |
| 2271 | 2272 | ||
| 2272 | static int | 2273 | static void |
| 2273 | intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, | 2274 | intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, |
| 2274 | const struct drm_plane_state *plane_state) | 2275 | const struct drm_plane_state *plane_state) |
| 2275 | { | 2276 | { |
| 2276 | struct intel_rotation_info *info = &view->rotation_info; | 2277 | struct intel_rotation_info *info = &view->params.rotation_info; |
| 2277 | unsigned int tile_height, tile_pitch; | 2278 | unsigned int tile_height, tile_pitch; |
| 2278 | 2279 | ||
| 2279 | *view = i915_ggtt_view_normal; | 2280 | *view = i915_ggtt_view_normal; |
| 2280 | 2281 | ||
| 2281 | if (!plane_state) | 2282 | if (!plane_state) |
| 2282 | return 0; | 2283 | return; |
| 2283 | 2284 | ||
| 2284 | if (!intel_rotation_90_or_270(plane_state->rotation)) | 2285 | if (!intel_rotation_90_or_270(plane_state->rotation)) |
| 2285 | return 0; | 2286 | return; |
| 2286 | 2287 | ||
| 2287 | *view = i915_ggtt_view_rotated; | 2288 | *view = i915_ggtt_view_rotated; |
| 2288 | 2289 | ||
| @@ -2309,8 +2310,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, | |||
| 2309 | info->size_uv = info->width_pages_uv * info->height_pages_uv * | 2310 | info->size_uv = info->width_pages_uv * info->height_pages_uv * |
| 2310 | PAGE_SIZE; | 2311 | PAGE_SIZE; |
| 2311 | } | 2312 | } |
| 2312 | |||
| 2313 | return 0; | ||
| 2314 | } | 2313 | } |
| 2315 | 2314 | ||
| 2316 | static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) | 2315 | static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) |
| @@ -2329,9 +2328,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) | |||
| 2329 | int | 2328 | int |
| 2330 | intel_pin_and_fence_fb_obj(struct drm_plane *plane, | 2329 | intel_pin_and_fence_fb_obj(struct drm_plane *plane, |
| 2331 | struct drm_framebuffer *fb, | 2330 | struct drm_framebuffer *fb, |
| 2332 | const struct drm_plane_state *plane_state, | 2331 | const struct drm_plane_state *plane_state) |
| 2333 | struct intel_engine_cs *pipelined, | ||
| 2334 | struct drm_i915_gem_request **pipelined_request) | ||
| 2335 | { | 2332 | { |
| 2336 | struct drm_device *dev = fb->dev; | 2333 | struct drm_device *dev = fb->dev; |
| 2337 | struct drm_i915_private *dev_priv = dev->dev_private; | 2334 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -2366,9 +2363,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, | |||
| 2366 | return -EINVAL; | 2363 | return -EINVAL; |
| 2367 | } | 2364 | } |
| 2368 | 2365 | ||
| 2369 | ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); | 2366 | intel_fill_fb_ggtt_view(&view, fb, plane_state); |
| 2370 | if (ret) | ||
| 2371 | return ret; | ||
| 2372 | 2367 | ||
| 2373 | /* Note that the w/a also requires 64 PTE of padding following the | 2368 | /* Note that the w/a also requires 64 PTE of padding following the |
| 2374 | * bo. We currently fill all unused PTE with the shadow page and so | 2369 | * bo. We currently fill all unused PTE with the shadow page and so |
| @@ -2387,11 +2382,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, | |||
| 2387 | */ | 2382 | */ |
| 2388 | intel_runtime_pm_get(dev_priv); | 2383 | intel_runtime_pm_get(dev_priv); |
| 2389 | 2384 | ||
| 2390 | dev_priv->mm.interruptible = false; | 2385 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, |
| 2391 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, | 2386 | &view); |
| 2392 | pipelined_request, &view); | ||
| 2393 | if (ret) | 2387 | if (ret) |
| 2394 | goto err_interruptible; | 2388 | goto err_pm; |
| 2395 | 2389 | ||
| 2396 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 2390 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
| 2397 | * fence, whereas 965+ only requires a fence if using | 2391 | * fence, whereas 965+ only requires a fence if using |
| @@ -2417,14 +2411,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, | |||
| 2417 | i915_gem_object_pin_fence(obj); | 2411 | i915_gem_object_pin_fence(obj); |
| 2418 | } | 2412 | } |
| 2419 | 2413 | ||
| 2420 | dev_priv->mm.interruptible = true; | ||
| 2421 | intel_runtime_pm_put(dev_priv); | 2414 | intel_runtime_pm_put(dev_priv); |
| 2422 | return 0; | 2415 | return 0; |
| 2423 | 2416 | ||
| 2424 | err_unpin: | 2417 | err_unpin: |
| 2425 | i915_gem_object_unpin_from_display_plane(obj, &view); | 2418 | i915_gem_object_unpin_from_display_plane(obj, &view); |
| 2426 | err_interruptible: | 2419 | err_pm: |
| 2427 | dev_priv->mm.interruptible = true; | ||
| 2428 | intel_runtime_pm_put(dev_priv); | 2420 | intel_runtime_pm_put(dev_priv); |
| 2429 | return ret; | 2421 | return ret; |
| 2430 | } | 2422 | } |
| @@ -2434,12 +2426,10 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, | |||
| 2434 | { | 2426 | { |
| 2435 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 2427 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 2436 | struct i915_ggtt_view view; | 2428 | struct i915_ggtt_view view; |
| 2437 | int ret; | ||
| 2438 | 2429 | ||
| 2439 | WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); | 2430 | WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); |
| 2440 | 2431 | ||
| 2441 | ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); | 2432 | intel_fill_fb_ggtt_view(&view, fb, plane_state); |
| 2442 | WARN_ONCE(ret, "Couldn't get view from plane state!"); | ||
| 2443 | 2433 | ||
| 2444 | if (view.type == I915_GGTT_VIEW_NORMAL) | 2434 | if (view.type == I915_GGTT_VIEW_NORMAL) |
| 2445 | i915_gem_object_unpin_fence(obj); | 2435 | i915_gem_object_unpin_fence(obj); |
| @@ -2680,7 +2670,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, | |||
| 2680 | int plane = intel_crtc->plane; | 2670 | int plane = intel_crtc->plane; |
| 2681 | unsigned long linear_offset; | 2671 | unsigned long linear_offset; |
| 2682 | u32 dspcntr; | 2672 | u32 dspcntr; |
| 2683 | u32 reg = DSPCNTR(plane); | 2673 | i915_reg_t reg = DSPCNTR(plane); |
| 2684 | int pixel_size; | 2674 | int pixel_size; |
| 2685 | 2675 | ||
| 2686 | if (!visible || !fb) { | 2676 | if (!visible || !fb) { |
| @@ -2810,7 +2800,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, | |||
| 2810 | int plane = intel_crtc->plane; | 2800 | int plane = intel_crtc->plane; |
| 2811 | unsigned long linear_offset; | 2801 | unsigned long linear_offset; |
| 2812 | u32 dspcntr; | 2802 | u32 dspcntr; |
| 2813 | u32 reg = DSPCNTR(plane); | 2803 | i915_reg_t reg = DSPCNTR(plane); |
| 2814 | int pixel_size; | 2804 | int pixel_size; |
| 2815 | 2805 | ||
| 2816 | if (!visible || !fb) { | 2806 | if (!visible || !fb) { |
| @@ -2935,30 +2925,32 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, | |||
| 2935 | } | 2925 | } |
| 2936 | } | 2926 | } |
| 2937 | 2927 | ||
| 2938 | unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, | 2928 | u32 intel_plane_obj_offset(struct intel_plane *intel_plane, |
| 2939 | struct drm_i915_gem_object *obj, | 2929 | struct drm_i915_gem_object *obj, |
| 2940 | unsigned int plane) | 2930 | unsigned int plane) |
| 2941 | { | 2931 | { |
| 2942 | const struct i915_ggtt_view *view = &i915_ggtt_view_normal; | 2932 | struct i915_ggtt_view view; |
| 2943 | struct i915_vma *vma; | 2933 | struct i915_vma *vma; |
| 2944 | unsigned char *offset; | 2934 | u64 offset; |
| 2945 | 2935 | ||
| 2946 | if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) | 2936 | intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, |
| 2947 | view = &i915_ggtt_view_rotated; | 2937 | intel_plane->base.state); |
| 2948 | 2938 | ||
| 2949 | vma = i915_gem_obj_to_ggtt_view(obj, view); | 2939 | vma = i915_gem_obj_to_ggtt_view(obj, &view); |
| 2950 | if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", | 2940 | if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", |
| 2951 | view->type)) | 2941 | view.type)) |
| 2952 | return -1; | 2942 | return -1; |
| 2953 | 2943 | ||
| 2954 | offset = (unsigned char *)vma->node.start; | 2944 | offset = vma->node.start; |
| 2955 | 2945 | ||
| 2956 | if (plane == 1) { | 2946 | if (plane == 1) { |
| 2957 | offset += vma->ggtt_view.rotation_info.uv_start_page * | 2947 | offset += vma->ggtt_view.params.rotation_info.uv_start_page * |
| 2958 | PAGE_SIZE; | 2948 | PAGE_SIZE; |
| 2959 | } | 2949 | } |
| 2960 | 2950 | ||
| 2961 | return (unsigned long)offset; | 2951 | WARN_ON(upper_32_bits(offset)); |
| 2952 | |||
| 2953 | return lower_32_bits(offset); | ||
| 2962 | } | 2954 | } |
| 2963 | 2955 | ||
| 2964 | static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) | 2956 | static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) |
| @@ -3084,7 +3076,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, | |||
| 3084 | u32 tile_height, plane_offset, plane_size; | 3076 | u32 tile_height, plane_offset, plane_size; |
| 3085 | unsigned int rotation; | 3077 | unsigned int rotation; |
| 3086 | int x_offset, y_offset; | 3078 | int x_offset, y_offset; |
| 3087 | unsigned long surf_addr; | 3079 | u32 surf_addr; |
| 3088 | struct intel_crtc_state *crtc_state = intel_crtc->config; | 3080 | struct intel_crtc_state *crtc_state = intel_crtc->config; |
| 3089 | struct intel_plane_state *plane_state; | 3081 | struct intel_plane_state *plane_state; |
| 3090 | int src_x = 0, src_y = 0, src_w = 0, src_h = 0; | 3082 | int src_x = 0, src_y = 0, src_w = 0, src_h = 0; |
| @@ -3212,10 +3204,9 @@ static void intel_update_primary_planes(struct drm_device *dev) | |||
| 3212 | struct intel_plane_state *plane_state; | 3204 | struct intel_plane_state *plane_state; |
| 3213 | 3205 | ||
| 3214 | drm_modeset_lock_crtc(crtc, &plane->base); | 3206 | drm_modeset_lock_crtc(crtc, &plane->base); |
| 3215 | |||
| 3216 | plane_state = to_intel_plane_state(plane->base.state); | 3207 | plane_state = to_intel_plane_state(plane->base.state); |
| 3217 | 3208 | ||
| 3218 | if (plane_state->base.fb) | 3209 | if (crtc->state->active && plane_state->base.fb) |
| 3219 | plane->commit_plane(&plane->base, plane_state); | 3210 | plane->commit_plane(&plane->base, plane_state); |
| 3220 | 3211 | ||
| 3221 | drm_modeset_unlock_crtc(crtc); | 3212 | drm_modeset_unlock_crtc(crtc); |
| @@ -3291,32 +3282,6 @@ void intel_finish_reset(struct drm_device *dev) | |||
| 3291 | drm_modeset_unlock_all(dev); | 3282 | drm_modeset_unlock_all(dev); |
| 3292 | } | 3283 | } |
| 3293 | 3284 | ||
| 3294 | static void | ||
| 3295 | intel_finish_fb(struct drm_framebuffer *old_fb) | ||
| 3296 | { | ||
| 3297 | struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); | ||
| 3298 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | ||
| 3299 | bool was_interruptible = dev_priv->mm.interruptible; | ||
| 3300 | int ret; | ||
| 3301 | |||
| 3302 | /* Big Hammer, we also need to ensure that any pending | ||
| 3303 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
| 3304 | * current scanout is retired before unpinning the old | ||
| 3305 | * framebuffer. Note that we rely on userspace rendering | ||
| 3306 | * into the buffer attached to the pipe they are waiting | ||
| 3307 | * on. If not, userspace generates a GPU hang with IPEHR | ||
| 3308 | * point to the MI_WAIT_FOR_EVENT. | ||
| 3309 | * | ||
| 3310 | * This should only fail upon a hung GPU, in which case we | ||
| 3311 | * can safely continue. | ||
| 3312 | */ | ||
| 3313 | dev_priv->mm.interruptible = false; | ||
| 3314 | ret = i915_gem_object_wait_rendering(obj, true); | ||
| 3315 | dev_priv->mm.interruptible = was_interruptible; | ||
| 3316 | |||
| 3317 | WARN_ON(ret); | ||
| 3318 | } | ||
| 3319 | |||
| 3320 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | 3285 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
| 3321 | { | 3286 | { |
| 3322 | struct drm_device *dev = crtc->dev; | 3287 | struct drm_device *dev = crtc->dev; |
| @@ -3386,7 +3351,8 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) | |||
| 3386 | struct drm_i915_private *dev_priv = dev->dev_private; | 3351 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3387 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3352 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3388 | int pipe = intel_crtc->pipe; | 3353 | int pipe = intel_crtc->pipe; |
| 3389 | u32 reg, temp; | 3354 | i915_reg_t reg; |
| 3355 | u32 temp; | ||
| 3390 | 3356 | ||
| 3391 | /* enable normal train */ | 3357 | /* enable normal train */ |
| 3392 | reg = FDI_TX_CTL(pipe); | 3358 | reg = FDI_TX_CTL(pipe); |
| @@ -3428,7 +3394,8 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
| 3428 | struct drm_i915_private *dev_priv = dev->dev_private; | 3394 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3429 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3395 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3430 | int pipe = intel_crtc->pipe; | 3396 | int pipe = intel_crtc->pipe; |
| 3431 | u32 reg, temp, tries; | 3397 | i915_reg_t reg; |
| 3398 | u32 temp, tries; | ||
| 3432 | 3399 | ||
| 3433 | /* FDI needs bits from pipe first */ | 3400 | /* FDI needs bits from pipe first */ |
| 3434 | assert_pipe_enabled(dev_priv, pipe); | 3401 | assert_pipe_enabled(dev_priv, pipe); |
| @@ -3528,7 +3495,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
| 3528 | struct drm_i915_private *dev_priv = dev->dev_private; | 3495 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3529 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3496 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3530 | int pipe = intel_crtc->pipe; | 3497 | int pipe = intel_crtc->pipe; |
| 3531 | u32 reg, temp, i, retry; | 3498 | i915_reg_t reg; |
| 3499 | u32 temp, i, retry; | ||
| 3532 | 3500 | ||
| 3533 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 3501 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
| 3534 | for train result */ | 3502 | for train result */ |
| @@ -3660,7 +3628,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
| 3660 | struct drm_i915_private *dev_priv = dev->dev_private; | 3628 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3661 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3629 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3662 | int pipe = intel_crtc->pipe; | 3630 | int pipe = intel_crtc->pipe; |
| 3663 | u32 reg, temp, i, j; | 3631 | i915_reg_t reg; |
| 3632 | u32 temp, i, j; | ||
| 3664 | 3633 | ||
| 3665 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 3634 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
| 3666 | for train result */ | 3635 | for train result */ |
| @@ -3777,8 +3746,8 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) | |||
| 3777 | struct drm_device *dev = intel_crtc->base.dev; | 3746 | struct drm_device *dev = intel_crtc->base.dev; |
| 3778 | struct drm_i915_private *dev_priv = dev->dev_private; | 3747 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3779 | int pipe = intel_crtc->pipe; | 3748 | int pipe = intel_crtc->pipe; |
| 3780 | u32 reg, temp; | 3749 | i915_reg_t reg; |
| 3781 | 3750 | u32 temp; | |
| 3782 | 3751 | ||
| 3783 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 3752 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
| 3784 | reg = FDI_RX_CTL(pipe); | 3753 | reg = FDI_RX_CTL(pipe); |
| @@ -3814,7 +3783,8 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) | |||
| 3814 | struct drm_device *dev = intel_crtc->base.dev; | 3783 | struct drm_device *dev = intel_crtc->base.dev; |
| 3815 | struct drm_i915_private *dev_priv = dev->dev_private; | 3784 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3816 | int pipe = intel_crtc->pipe; | 3785 | int pipe = intel_crtc->pipe; |
| 3817 | u32 reg, temp; | 3786 | i915_reg_t reg; |
| 3787 | u32 temp; | ||
| 3818 | 3788 | ||
| 3819 | /* Switch from PCDclk to Rawclk */ | 3789 | /* Switch from PCDclk to Rawclk */ |
| 3820 | reg = FDI_RX_CTL(pipe); | 3790 | reg = FDI_RX_CTL(pipe); |
| @@ -3844,7 +3814,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) | |||
| 3844 | struct drm_i915_private *dev_priv = dev->dev_private; | 3814 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3845 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3815 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3846 | int pipe = intel_crtc->pipe; | 3816 | int pipe = intel_crtc->pipe; |
| 3847 | u32 reg, temp; | 3817 | i915_reg_t reg; |
| 3818 | u32 temp; | ||
| 3848 | 3819 | ||
| 3849 | /* disable CPU FDI tx and PCH FDI rx */ | 3820 | /* disable CPU FDI tx and PCH FDI rx */ |
| 3850 | reg = FDI_TX_CTL(pipe); | 3821 | reg = FDI_TX_CTL(pipe); |
| @@ -3937,15 +3908,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) | |||
| 3937 | work->pending_flip_obj); | 3908 | work->pending_flip_obj); |
| 3938 | } | 3909 | } |
| 3939 | 3910 | ||
| 3940 | void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 3911 | static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
| 3941 | { | 3912 | { |
| 3942 | struct drm_device *dev = crtc->dev; | 3913 | struct drm_device *dev = crtc->dev; |
| 3943 | struct drm_i915_private *dev_priv = dev->dev_private; | 3914 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3915 | long ret; | ||
| 3944 | 3916 | ||
| 3945 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); | 3917 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); |
| 3946 | if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, | 3918 | |
| 3947 | !intel_crtc_has_pending_flip(crtc), | 3919 | ret = wait_event_interruptible_timeout( |
| 3948 | 60*HZ) == 0)) { | 3920 | dev_priv->pending_flip_queue, |
| 3921 | !intel_crtc_has_pending_flip(crtc), | ||
| 3922 | 60*HZ); | ||
| 3923 | |||
| 3924 | if (ret < 0) | ||
| 3925 | return ret; | ||
| 3926 | |||
| 3927 | if (ret == 0) { | ||
| 3949 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3928 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 3950 | 3929 | ||
| 3951 | spin_lock_irq(&dev->event_lock); | 3930 | spin_lock_irq(&dev->event_lock); |
| @@ -3956,11 +3935,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
| 3956 | spin_unlock_irq(&dev->event_lock); | 3935 | spin_unlock_irq(&dev->event_lock); |
| 3957 | } | 3936 | } |
| 3958 | 3937 | ||
| 3959 | if (crtc->primary->fb) { | 3938 | return 0; |
| 3960 | mutex_lock(&dev->struct_mutex); | ||
| 3961 | intel_finish_fb(crtc->primary->fb); | ||
| 3962 | mutex_unlock(&dev->struct_mutex); | ||
| 3963 | } | ||
| 3964 | } | 3939 | } |
| 3965 | 3940 | ||
| 3966 | /* Program iCLKIP clock to the desired frequency */ | 3941 | /* Program iCLKIP clock to the desired frequency */ |
| @@ -4120,6 +4095,22 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) | |||
| 4120 | } | 4095 | } |
| 4121 | } | 4096 | } |
| 4122 | 4097 | ||
| 4098 | /* Return which DP Port should be selected for Transcoder DP control */ | ||
| 4099 | static enum port | ||
| 4100 | intel_trans_dp_port_sel(struct drm_crtc *crtc) | ||
| 4101 | { | ||
| 4102 | struct drm_device *dev = crtc->dev; | ||
| 4103 | struct intel_encoder *encoder; | ||
| 4104 | |||
| 4105 | for_each_encoder_on_crtc(dev, crtc, encoder) { | ||
| 4106 | if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || | ||
| 4107 | encoder->type == INTEL_OUTPUT_EDP) | ||
| 4108 | return enc_to_dig_port(&encoder->base)->port; | ||
| 4109 | } | ||
| 4110 | |||
| 4111 | return -1; | ||
| 4112 | } | ||
| 4113 | |||
| 4123 | /* | 4114 | /* |
| 4124 | * Enable PCH resources required for PCH ports: | 4115 | * Enable PCH resources required for PCH ports: |
| 4125 | * - PCH PLLs | 4116 | * - PCH PLLs |
| @@ -4134,7 +4125,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
| 4134 | struct drm_i915_private *dev_priv = dev->dev_private; | 4125 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4135 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4126 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 4136 | int pipe = intel_crtc->pipe; | 4127 | int pipe = intel_crtc->pipe; |
| 4137 | u32 reg, temp; | 4128 | u32 temp; |
| 4138 | 4129 | ||
| 4139 | assert_pch_transcoder_disabled(dev_priv, pipe); | 4130 | assert_pch_transcoder_disabled(dev_priv, pipe); |
| 4140 | 4131 | ||
| @@ -4181,8 +4172,10 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
| 4181 | 4172 | ||
| 4182 | /* For PCH DP, enable TRANS_DP_CTL */ | 4173 | /* For PCH DP, enable TRANS_DP_CTL */ |
| 4183 | if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { | 4174 | if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { |
| 4175 | const struct drm_display_mode *adjusted_mode = | ||
| 4176 | &intel_crtc->config->base.adjusted_mode; | ||
| 4184 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; | 4177 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; |
| 4185 | reg = TRANS_DP_CTL(pipe); | 4178 | i915_reg_t reg = TRANS_DP_CTL(pipe); |
| 4186 | temp = I915_READ(reg); | 4179 | temp = I915_READ(reg); |
| 4187 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | 4180 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
| 4188 | TRANS_DP_SYNC_MASK | | 4181 | TRANS_DP_SYNC_MASK | |
| @@ -4190,19 +4183,19 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
| 4190 | temp |= TRANS_DP_OUTPUT_ENABLE; | 4183 | temp |= TRANS_DP_OUTPUT_ENABLE; |
| 4191 | temp |= bpc << 9; /* same format but at 11:9 */ | 4184 | temp |= bpc << 9; /* same format but at 11:9 */ |
| 4192 | 4185 | ||
| 4193 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 4186 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
| 4194 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 4187 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
| 4195 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | 4188 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
| 4196 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; | 4189 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
| 4197 | 4190 | ||
| 4198 | switch (intel_trans_dp_port_sel(crtc)) { | 4191 | switch (intel_trans_dp_port_sel(crtc)) { |
| 4199 | case PCH_DP_B: | 4192 | case PORT_B: |
| 4200 | temp |= TRANS_DP_PORT_SEL_B; | 4193 | temp |= TRANS_DP_PORT_SEL_B; |
| 4201 | break; | 4194 | break; |
| 4202 | case PCH_DP_C: | 4195 | case PORT_C: |
| 4203 | temp |= TRANS_DP_PORT_SEL_C; | 4196 | temp |= TRANS_DP_PORT_SEL_C; |
| 4204 | break; | 4197 | break; |
| 4205 | case PCH_DP_D: | 4198 | case PORT_D: |
| 4206 | temp |= TRANS_DP_PORT_SEL_D; | 4199 | temp |= TRANS_DP_PORT_SEL_D; |
| 4207 | break; | 4200 | break; |
| 4208 | default: | 4201 | default: |
| @@ -4342,7 +4335,7 @@ static void intel_shared_dpll_commit(struct drm_atomic_state *state) | |||
| 4342 | static void cpt_verify_modeset(struct drm_device *dev, int pipe) | 4335 | static void cpt_verify_modeset(struct drm_device *dev, int pipe) |
| 4343 | { | 4336 | { |
| 4344 | struct drm_i915_private *dev_priv = dev->dev_private; | 4337 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4345 | int dslreg = PIPEDSL(pipe); | 4338 | i915_reg_t dslreg = PIPEDSL(pipe); |
| 4346 | u32 temp; | 4339 | u32 temp; |
| 4347 | 4340 | ||
| 4348 | temp = I915_READ(dslreg); | 4341 | temp = I915_READ(dslreg); |
| @@ -4652,7 +4645,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
| 4652 | } | 4645 | } |
| 4653 | 4646 | ||
| 4654 | for (i = 0; i < 256; i++) { | 4647 | for (i = 0; i < 256; i++) { |
| 4655 | u32 palreg; | 4648 | i915_reg_t palreg; |
| 4656 | 4649 | ||
| 4657 | if (HAS_GMCH_DISPLAY(dev)) | 4650 | if (HAS_GMCH_DISPLAY(dev)) |
| 4658 | palreg = PALETTE(pipe, i); | 4651 | palreg = PALETTE(pipe, i); |
| @@ -4731,9 +4724,9 @@ intel_post_enable_primary(struct drm_crtc *crtc) | |||
| 4731 | if (IS_GEN2(dev)) | 4724 | if (IS_GEN2(dev)) |
| 4732 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | 4725 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| 4733 | 4726 | ||
| 4734 | /* Underruns don't raise interrupts, so check manually. */ | 4727 | /* Underruns don't always raise interrupts, so check manually. */ |
| 4735 | if (HAS_GMCH_DISPLAY(dev)) | 4728 | intel_check_cpu_fifo_underruns(dev_priv); |
| 4736 | i9xx_check_fifo_underruns(dev_priv); | 4729 | intel_check_pch_fifo_underruns(dev_priv); |
| 4737 | } | 4730 | } |
| 4738 | 4731 | ||
| 4739 | /** | 4732 | /** |
| @@ -4792,7 +4785,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc) | |||
| 4792 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; | 4785 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
| 4793 | struct drm_device *dev = crtc->base.dev; | 4786 | struct drm_device *dev = crtc->base.dev; |
| 4794 | struct drm_i915_private *dev_priv = dev->dev_private; | 4787 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4795 | struct drm_plane *plane; | ||
| 4796 | 4788 | ||
| 4797 | if (atomic->wait_vblank) | 4789 | if (atomic->wait_vblank) |
| 4798 | intel_wait_for_vblank(dev, crtc->pipe); | 4790 | intel_wait_for_vblank(dev, crtc->pipe); |
| @@ -4811,10 +4803,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc) | |||
| 4811 | if (atomic->post_enable_primary) | 4803 | if (atomic->post_enable_primary) |
| 4812 | intel_post_enable_primary(&crtc->base); | 4804 | intel_post_enable_primary(&crtc->base); |
| 4813 | 4805 | ||
| 4814 | drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) | ||
| 4815 | intel_update_sprite_watermarks(plane, &crtc->base, | ||
| 4816 | 0, 0, 0, false, false); | ||
| 4817 | |||
| 4818 | memset(atomic, 0, sizeof(*atomic)); | 4806 | memset(atomic, 0, sizeof(*atomic)); |
| 4819 | } | 4807 | } |
| 4820 | 4808 | ||
| @@ -4823,20 +4811,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) | |||
| 4823 | struct drm_device *dev = crtc->base.dev; | 4811 | struct drm_device *dev = crtc->base.dev; |
| 4824 | struct drm_i915_private *dev_priv = dev->dev_private; | 4812 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4825 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; | 4813 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
| 4826 | struct drm_plane *p; | ||
| 4827 | |||
| 4828 | /* Track fb's for any planes being disabled */ | ||
| 4829 | drm_for_each_plane_mask(p, dev, atomic->disabled_planes) { | ||
| 4830 | struct intel_plane *plane = to_intel_plane(p); | ||
| 4831 | |||
| 4832 | mutex_lock(&dev->struct_mutex); | ||
| 4833 | i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL, | ||
| 4834 | plane->frontbuffer_bit); | ||
| 4835 | mutex_unlock(&dev->struct_mutex); | ||
| 4836 | } | ||
| 4837 | |||
| 4838 | if (atomic->wait_for_flips) | ||
| 4839 | intel_crtc_wait_for_pending_flips(&crtc->base); | ||
| 4840 | 4814 | ||
| 4841 | if (atomic->disable_fbc) | 4815 | if (atomic->disable_fbc) |
| 4842 | intel_fbc_disable_crtc(crtc); | 4816 | intel_fbc_disable_crtc(crtc); |
| @@ -4885,6 +4859,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
| 4885 | return; | 4859 | return; |
| 4886 | 4860 | ||
| 4887 | if (intel_crtc->config->has_pch_encoder) | 4861 | if (intel_crtc->config->has_pch_encoder) |
| 4862 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | ||
| 4863 | |||
| 4864 | if (intel_crtc->config->has_pch_encoder) | ||
| 4888 | intel_prepare_shared_dpll(intel_crtc); | 4865 | intel_prepare_shared_dpll(intel_crtc); |
| 4889 | 4866 | ||
| 4890 | if (intel_crtc->config->has_dp_encoder) | 4867 | if (intel_crtc->config->has_dp_encoder) |
| @@ -4902,7 +4879,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
| 4902 | intel_crtc->active = true; | 4879 | intel_crtc->active = true; |
| 4903 | 4880 | ||
| 4904 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | 4881 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| 4905 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); | ||
| 4906 | 4882 | ||
| 4907 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4883 | for_each_encoder_on_crtc(dev, crtc, encoder) |
| 4908 | if (encoder->pre_enable) | 4884 | if (encoder->pre_enable) |
| @@ -4940,6 +4916,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
| 4940 | 4916 | ||
| 4941 | if (HAS_PCH_CPT(dev)) | 4917 | if (HAS_PCH_CPT(dev)) |
| 4942 | cpt_verify_modeset(dev, intel_crtc->pipe); | 4918 | cpt_verify_modeset(dev, intel_crtc->pipe); |
| 4919 | |||
| 4920 | /* Must wait for vblank to avoid spurious PCH FIFO underruns */ | ||
| 4921 | if (intel_crtc->config->has_pch_encoder) | ||
| 4922 | intel_wait_for_vblank(dev, pipe); | ||
| 4923 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); | ||
| 4943 | } | 4924 | } |
| 4944 | 4925 | ||
| 4945 | /* IPS only exists on ULT machines and is tied to pipe A. */ | 4926 | /* IPS only exists on ULT machines and is tied to pipe A. */ |
| @@ -4962,6 +4943,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 4962 | if (WARN_ON(intel_crtc->active)) | 4943 | if (WARN_ON(intel_crtc->active)) |
| 4963 | return; | 4944 | return; |
| 4964 | 4945 | ||
| 4946 | if (intel_crtc->config->has_pch_encoder) | ||
| 4947 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 4948 | false); | ||
| 4949 | |||
| 4965 | if (intel_crtc_to_shared_dpll(intel_crtc)) | 4950 | if (intel_crtc_to_shared_dpll(intel_crtc)) |
| 4966 | intel_enable_shared_dpll(intel_crtc); | 4951 | intel_enable_shared_dpll(intel_crtc); |
| 4967 | 4952 | ||
| @@ -4994,11 +4979,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 4994 | encoder->pre_enable(encoder); | 4979 | encoder->pre_enable(encoder); |
| 4995 | } | 4980 | } |
| 4996 | 4981 | ||
| 4997 | if (intel_crtc->config->has_pch_encoder) { | 4982 | if (intel_crtc->config->has_pch_encoder) |
| 4998 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 4999 | true); | ||
| 5000 | dev_priv->display.fdi_link_train(crtc); | 4983 | dev_priv->display.fdi_link_train(crtc); |
| 5001 | } | ||
| 5002 | 4984 | ||
| 5003 | if (!is_dsi) | 4985 | if (!is_dsi) |
| 5004 | intel_ddi_enable_pipe_clock(intel_crtc); | 4986 | intel_ddi_enable_pipe_clock(intel_crtc); |
| @@ -5035,6 +5017,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) | |||
| 5035 | intel_opregion_notify_encoder(encoder, true); | 5017 | intel_opregion_notify_encoder(encoder, true); |
| 5036 | } | 5018 | } |
| 5037 | 5019 | ||
| 5020 | if (intel_crtc->config->has_pch_encoder) | ||
| 5021 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 5022 | true); | ||
| 5023 | |||
| 5038 | /* If we change the relative order between pipe/planes enabling, we need | 5024 | /* If we change the relative order between pipe/planes enabling, we need |
| 5039 | * to change the workaround. */ | 5025 | * to change the workaround. */ |
| 5040 | hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; | 5026 | hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; |
| @@ -5066,7 +5052,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5066 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5052 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 5067 | struct intel_encoder *encoder; | 5053 | struct intel_encoder *encoder; |
| 5068 | int pipe = intel_crtc->pipe; | 5054 | int pipe = intel_crtc->pipe; |
| 5069 | u32 reg, temp; | 5055 | |
| 5056 | if (intel_crtc->config->has_pch_encoder) | ||
| 5057 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | ||
| 5070 | 5058 | ||
| 5071 | for_each_encoder_on_crtc(dev, crtc, encoder) | 5059 | for_each_encoder_on_crtc(dev, crtc, encoder) |
| 5072 | encoder->disable(encoder); | 5060 | encoder->disable(encoder); |
| @@ -5074,9 +5062,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5074 | drm_crtc_vblank_off(crtc); | 5062 | drm_crtc_vblank_off(crtc); |
| 5075 | assert_vblank_disabled(crtc); | 5063 | assert_vblank_disabled(crtc); |
| 5076 | 5064 | ||
| 5077 | if (intel_crtc->config->has_pch_encoder) | ||
| 5078 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | ||
| 5079 | |||
| 5080 | intel_disable_pipe(intel_crtc); | 5065 | intel_disable_pipe(intel_crtc); |
| 5081 | 5066 | ||
| 5082 | ironlake_pfit_disable(intel_crtc, false); | 5067 | ironlake_pfit_disable(intel_crtc, false); |
| @@ -5092,6 +5077,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5092 | ironlake_disable_pch_transcoder(dev_priv, pipe); | 5077 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
| 5093 | 5078 | ||
| 5094 | if (HAS_PCH_CPT(dev)) { | 5079 | if (HAS_PCH_CPT(dev)) { |
| 5080 | i915_reg_t reg; | ||
| 5081 | u32 temp; | ||
| 5082 | |||
| 5095 | /* disable TRANS_DP_CTL */ | 5083 | /* disable TRANS_DP_CTL */ |
| 5096 | reg = TRANS_DP_CTL(pipe); | 5084 | reg = TRANS_DP_CTL(pipe); |
| 5097 | temp = I915_READ(reg); | 5085 | temp = I915_READ(reg); |
| @@ -5108,6 +5096,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
| 5108 | 5096 | ||
| 5109 | ironlake_fdi_pll_disable(intel_crtc); | 5097 | ironlake_fdi_pll_disable(intel_crtc); |
| 5110 | } | 5098 | } |
| 5099 | |||
| 5100 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); | ||
| 5111 | } | 5101 | } |
| 5112 | 5102 | ||
| 5113 | static void haswell_crtc_disable(struct drm_crtc *crtc) | 5103 | static void haswell_crtc_disable(struct drm_crtc *crtc) |
| @@ -5119,6 +5109,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5119 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 5109 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; |
| 5120 | bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); | 5110 | bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); |
| 5121 | 5111 | ||
| 5112 | if (intel_crtc->config->has_pch_encoder) | ||
| 5113 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 5114 | false); | ||
| 5115 | |||
| 5122 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 5116 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
| 5123 | intel_opregion_notify_encoder(encoder, false); | 5117 | intel_opregion_notify_encoder(encoder, false); |
| 5124 | encoder->disable(encoder); | 5118 | encoder->disable(encoder); |
| @@ -5127,9 +5121,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5127 | drm_crtc_vblank_off(crtc); | 5121 | drm_crtc_vblank_off(crtc); |
| 5128 | assert_vblank_disabled(crtc); | 5122 | assert_vblank_disabled(crtc); |
| 5129 | 5123 | ||
| 5130 | if (intel_crtc->config->has_pch_encoder) | ||
| 5131 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 5132 | false); | ||
| 5133 | intel_disable_pipe(intel_crtc); | 5124 | intel_disable_pipe(intel_crtc); |
| 5134 | 5125 | ||
| 5135 | if (intel_crtc->config->dp_encoder_is_mst) | 5126 | if (intel_crtc->config->dp_encoder_is_mst) |
| @@ -5154,6 +5145,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
| 5154 | for_each_encoder_on_crtc(dev, crtc, encoder) | 5145 | for_each_encoder_on_crtc(dev, crtc, encoder) |
| 5155 | if (encoder->post_disable) | 5146 | if (encoder->post_disable) |
| 5156 | encoder->post_disable(encoder); | 5147 | encoder->post_disable(encoder); |
| 5148 | |||
| 5149 | if (intel_crtc->config->has_pch_encoder) | ||
| 5150 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | ||
| 5151 | true); | ||
| 5157 | } | 5152 | } |
| 5158 | 5153 | ||
| 5159 | static void i9xx_pfit_enable(struct intel_crtc *crtc) | 5154 | static void i9xx_pfit_enable(struct intel_crtc *crtc) |
| @@ -5184,21 +5179,41 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) | |||
| 5184 | { | 5179 | { |
| 5185 | switch (port) { | 5180 | switch (port) { |
| 5186 | case PORT_A: | 5181 | case PORT_A: |
| 5187 | return POWER_DOMAIN_PORT_DDI_A_4_LANES; | 5182 | return POWER_DOMAIN_PORT_DDI_A_LANES; |
| 5188 | case PORT_B: | 5183 | case PORT_B: |
| 5189 | return POWER_DOMAIN_PORT_DDI_B_4_LANES; | 5184 | return POWER_DOMAIN_PORT_DDI_B_LANES; |
| 5190 | case PORT_C: | 5185 | case PORT_C: |
| 5191 | return POWER_DOMAIN_PORT_DDI_C_4_LANES; | 5186 | return POWER_DOMAIN_PORT_DDI_C_LANES; |
| 5192 | case PORT_D: | 5187 | case PORT_D: |
| 5193 | return POWER_DOMAIN_PORT_DDI_D_4_LANES; | 5188 | return POWER_DOMAIN_PORT_DDI_D_LANES; |
| 5194 | case PORT_E: | 5189 | case PORT_E: |
| 5195 | return POWER_DOMAIN_PORT_DDI_E_2_LANES; | 5190 | return POWER_DOMAIN_PORT_DDI_E_LANES; |
| 5196 | default: | 5191 | default: |
| 5197 | WARN_ON_ONCE(1); | 5192 | MISSING_CASE(port); |
| 5198 | return POWER_DOMAIN_PORT_OTHER; | 5193 | return POWER_DOMAIN_PORT_OTHER; |
| 5199 | } | 5194 | } |
| 5200 | } | 5195 | } |
| 5201 | 5196 | ||
| 5197 | static enum intel_display_power_domain port_to_aux_power_domain(enum port port) | ||
| 5198 | { | ||
| 5199 | switch (port) { | ||
| 5200 | case PORT_A: | ||
| 5201 | return POWER_DOMAIN_AUX_A; | ||
| 5202 | case PORT_B: | ||
| 5203 | return POWER_DOMAIN_AUX_B; | ||
| 5204 | case PORT_C: | ||
| 5205 | return POWER_DOMAIN_AUX_C; | ||
| 5206 | case PORT_D: | ||
| 5207 | return POWER_DOMAIN_AUX_D; | ||
| 5208 | case PORT_E: | ||
| 5209 | /* FIXME: Check VBT for actual wiring of PORT E */ | ||
| 5210 | return POWER_DOMAIN_AUX_D; | ||
| 5211 | default: | ||
| 5212 | MISSING_CASE(port); | ||
| 5213 | return POWER_DOMAIN_AUX_A; | ||
| 5214 | } | ||
| 5215 | } | ||
| 5216 | |||
| 5202 | #define for_each_power_domain(domain, mask) \ | 5217 | #define for_each_power_domain(domain, mask) \ |
| 5203 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ | 5218 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ |
| 5204 | if ((1 << (domain)) & (mask)) | 5219 | if ((1 << (domain)) & (mask)) |
| @@ -5230,6 +5245,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) | |||
| 5230 | } | 5245 | } |
| 5231 | } | 5246 | } |
| 5232 | 5247 | ||
| 5248 | enum intel_display_power_domain | ||
| 5249 | intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) | ||
| 5250 | { | ||
| 5251 | struct drm_device *dev = intel_encoder->base.dev; | ||
| 5252 | struct intel_digital_port *intel_dig_port; | ||
| 5253 | |||
| 5254 | switch (intel_encoder->type) { | ||
| 5255 | case INTEL_OUTPUT_UNKNOWN: | ||
| 5256 | case INTEL_OUTPUT_HDMI: | ||
| 5257 | /* | ||
| 5258 | * Only DDI platforms should ever use these output types. | ||
| 5259 | * We can get here after the HDMI detect code has already set | ||
| 5260 | * the type of the shared encoder. Since we can't be sure | ||
| 5261 | * what's the status of the given connectors, play safe and | ||
| 5262 | * run the DP detection too. | ||
| 5263 | */ | ||
| 5264 | WARN_ON_ONCE(!HAS_DDI(dev)); | ||
| 5265 | case INTEL_OUTPUT_DISPLAYPORT: | ||
| 5266 | case INTEL_OUTPUT_EDP: | ||
| 5267 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | ||
| 5268 | return port_to_aux_power_domain(intel_dig_port->port); | ||
| 5269 | case INTEL_OUTPUT_DP_MST: | ||
| 5270 | intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; | ||
| 5271 | return port_to_aux_power_domain(intel_dig_port->port); | ||
| 5272 | default: | ||
| 5273 | MISSING_CASE(intel_encoder->type); | ||
| 5274 | return POWER_DOMAIN_AUX_A; | ||
| 5275 | } | ||
| 5276 | } | ||
| 5277 | |||
| 5233 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | 5278 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) |
| 5234 | { | 5279 | { |
| 5235 | struct drm_device *dev = crtc->dev; | 5280 | struct drm_device *dev = crtc->dev; |
| @@ -5237,13 +5282,11 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | |||
| 5237 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5282 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 5238 | enum pipe pipe = intel_crtc->pipe; | 5283 | enum pipe pipe = intel_crtc->pipe; |
| 5239 | unsigned long mask; | 5284 | unsigned long mask; |
| 5240 | enum transcoder transcoder; | 5285 | enum transcoder transcoder = intel_crtc->config->cpu_transcoder; |
| 5241 | 5286 | ||
| 5242 | if (!crtc->state->active) | 5287 | if (!crtc->state->active) |
| 5243 | return 0; | 5288 | return 0; |
| 5244 | 5289 | ||
| 5245 | transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); | ||
| 5246 | |||
| 5247 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); | 5290 | mask = BIT(POWER_DOMAIN_PIPE(pipe)); |
| 5248 | mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); | 5291 | mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); |
| 5249 | if (intel_crtc->config->pch_pfit.enabled || | 5292 | if (intel_crtc->config->pch_pfit.enabled || |
| @@ -5330,7 +5373,7 @@ static void intel_update_max_cdclk(struct drm_device *dev) | |||
| 5330 | { | 5373 | { |
| 5331 | struct drm_i915_private *dev_priv = dev->dev_private; | 5374 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5332 | 5375 | ||
| 5333 | if (IS_SKYLAKE(dev)) { | 5376 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
| 5334 | u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; | 5377 | u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; |
| 5335 | 5378 | ||
| 5336 | if (limit == SKL_DFSM_CDCLK_LIMIT_675) | 5379 | if (limit == SKL_DFSM_CDCLK_LIMIT_675) |
| @@ -5747,32 +5790,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv) | |||
| 5747 | if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) | 5790 | if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) |
| 5748 | DRM_ERROR("DBuf power disable timeout\n"); | 5791 | DRM_ERROR("DBuf power disable timeout\n"); |
| 5749 | 5792 | ||
| 5750 | /* | 5793 | /* disable DPLL0 */ |
| 5751 | * DMC assumes ownership of LCPLL and will get confused if we touch it. | 5794 | I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); |
| 5752 | */ | 5795 | if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) |
| 5753 | if (dev_priv->csr.dmc_payload) { | 5796 | DRM_ERROR("Couldn't disable DPLL0\n"); |
| 5754 | /* disable DPLL0 */ | ||
| 5755 | I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & | ||
| 5756 | ~LCPLL_PLL_ENABLE); | ||
| 5757 | if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) | ||
| 5758 | DRM_ERROR("Couldn't disable DPLL0\n"); | ||
| 5759 | } | ||
| 5760 | |||
| 5761 | intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); | ||
| 5762 | } | 5797 | } |
| 5763 | 5798 | ||
| 5764 | void skl_init_cdclk(struct drm_i915_private *dev_priv) | 5799 | void skl_init_cdclk(struct drm_i915_private *dev_priv) |
| 5765 | { | 5800 | { |
| 5766 | u32 val; | ||
| 5767 | unsigned int required_vco; | 5801 | unsigned int required_vco; |
| 5768 | 5802 | ||
| 5769 | /* enable PCH reset handshake */ | ||
| 5770 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | ||
| 5771 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); | ||
| 5772 | |||
| 5773 | /* enable PG1 and Misc I/O */ | ||
| 5774 | intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); | ||
| 5775 | |||
| 5776 | /* DPLL0 not enabled (happens on early BIOS versions) */ | 5803 | /* DPLL0 not enabled (happens on early BIOS versions) */ |
| 5777 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { | 5804 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { |
| 5778 | /* enable DPLL0 */ | 5805 | /* enable DPLL0 */ |
| @@ -5793,6 +5820,45 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv) | |||
| 5793 | DRM_ERROR("DBuf power enable timeout\n"); | 5820 | DRM_ERROR("DBuf power enable timeout\n"); |
| 5794 | } | 5821 | } |
| 5795 | 5822 | ||
| 5823 | int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) | ||
| 5824 | { | ||
| 5825 | uint32_t lcpll1 = I915_READ(LCPLL1_CTL); | ||
| 5826 | uint32_t cdctl = I915_READ(CDCLK_CTL); | ||
| 5827 | int freq = dev_priv->skl_boot_cdclk; | ||
| 5828 | |||
| 5829 | /* | ||
| 5830 | * check if the pre-os intialized the display | ||
| 5831 | * There is SWF18 scratchpad register defined which is set by the | ||
| 5832 | * pre-os which can be used by the OS drivers to check the status | ||
| 5833 | */ | ||
| 5834 | if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) | ||
| 5835 | goto sanitize; | ||
| 5836 | |||
| 5837 | /* Is PLL enabled and locked ? */ | ||
| 5838 | if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) | ||
| 5839 | goto sanitize; | ||
| 5840 | |||
| 5841 | /* DPLL okay; verify the cdclock | ||
| 5842 | * | ||
| 5843 | * Noticed in some instances that the freq selection is correct but | ||
| 5844 | * decimal part is programmed wrong from BIOS where pre-os does not | ||
| 5845 | * enable display. Verify the same as well. | ||
| 5846 | */ | ||
| 5847 | if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) | ||
| 5848 | /* All well; nothing to sanitize */ | ||
| 5849 | return false; | ||
| 5850 | sanitize: | ||
| 5851 | /* | ||
| 5852 | * As of now initialize with max cdclk till | ||
| 5853 | * we get dynamic cdclk support | ||
| 5854 | * */ | ||
| 5855 | dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; | ||
| 5856 | skl_init_cdclk(dev_priv); | ||
| 5857 | |||
| 5858 | /* we did have to sanitize */ | ||
| 5859 | return true; | ||
| 5860 | } | ||
| 5861 | |||
| 5796 | /* Adjust CDclk dividers to allow high res or save power if possible */ | 5862 | /* Adjust CDclk dividers to allow high res or save power if possible */ |
| 5797 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) | 5863 | static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) |
| 5798 | { | 5864 | { |
| @@ -6257,7 +6323,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
| 6257 | return; | 6323 | return; |
| 6258 | 6324 | ||
| 6259 | if (to_intel_plane_state(crtc->primary->state)->visible) { | 6325 | if (to_intel_plane_state(crtc->primary->state)->visible) { |
| 6260 | intel_crtc_wait_for_pending_flips(crtc); | 6326 | WARN_ON(intel_crtc->unpin_work); |
| 6327 | |||
| 6261 | intel_pre_disable_primary(crtc); | 6328 | intel_pre_disable_primary(crtc); |
| 6262 | } | 6329 | } |
| 6263 | 6330 | ||
| @@ -6575,6 +6642,15 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, | |||
| 6575 | pipe_config_supports_ips(dev_priv, pipe_config); | 6642 | pipe_config_supports_ips(dev_priv, pipe_config); |
| 6576 | } | 6643 | } |
| 6577 | 6644 | ||
| 6645 | static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) | ||
| 6646 | { | ||
| 6647 | const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
| 6648 | |||
| 6649 | /* GDG double wide on either pipe, otherwise pipe A only */ | ||
| 6650 | return INTEL_INFO(dev_priv)->gen < 4 && | ||
| 6651 | (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); | ||
| 6652 | } | ||
| 6653 | |||
| 6578 | static int intel_crtc_compute_config(struct intel_crtc *crtc, | 6654 | static int intel_crtc_compute_config(struct intel_crtc *crtc, |
| 6579 | struct intel_crtc_state *pipe_config) | 6655 | struct intel_crtc_state *pipe_config) |
| 6580 | { | 6656 | { |
| @@ -6584,23 +6660,24 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, | |||
| 6584 | 6660 | ||
| 6585 | /* FIXME should check pixel clock limits on all platforms */ | 6661 | /* FIXME should check pixel clock limits on all platforms */ |
| 6586 | if (INTEL_INFO(dev)->gen < 4) { | 6662 | if (INTEL_INFO(dev)->gen < 4) { |
| 6587 | int clock_limit = dev_priv->max_cdclk_freq; | 6663 | int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; |
| 6588 | 6664 | ||
| 6589 | /* | 6665 | /* |
| 6590 | * Enable pixel doubling when the dot clock | 6666 | * Enable double wide mode when the dot clock |
| 6591 | * is > 90% of the (display) core speed. | 6667 | * is > 90% of the (display) core speed. |
| 6592 | * | ||
| 6593 | * GDG double wide on either pipe, | ||
| 6594 | * otherwise pipe A only. | ||
| 6595 | */ | 6668 | */ |
| 6596 | if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && | 6669 | if (intel_crtc_supports_double_wide(crtc) && |
| 6597 | adjusted_mode->crtc_clock > clock_limit * 9 / 10) { | 6670 | adjusted_mode->crtc_clock > clock_limit) { |
| 6598 | clock_limit *= 2; | 6671 | clock_limit *= 2; |
| 6599 | pipe_config->double_wide = true; | 6672 | pipe_config->double_wide = true; |
| 6600 | } | 6673 | } |
| 6601 | 6674 | ||
| 6602 | if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) | 6675 | if (adjusted_mode->crtc_clock > clock_limit) { |
| 6676 | DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", | ||
| 6677 | adjusted_mode->crtc_clock, clock_limit, | ||
| 6678 | yesno(pipe_config->double_wide)); | ||
| 6603 | return -EINVAL; | 6679 | return -EINVAL; |
| 6680 | } | ||
| 6604 | } | 6681 | } |
| 6605 | 6682 | ||
| 6606 | /* | 6683 | /* |
| @@ -7365,7 +7442,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
| 7365 | struct drm_device *dev = crtc->base.dev; | 7442 | struct drm_device *dev = crtc->base.dev; |
| 7366 | struct drm_i915_private *dev_priv = dev->dev_private; | 7443 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 7367 | int pipe = crtc->pipe; | 7444 | int pipe = crtc->pipe; |
| 7368 | int dpll_reg = DPLL(crtc->pipe); | 7445 | i915_reg_t dpll_reg = DPLL(crtc->pipe); |
| 7369 | enum dpio_channel port = vlv_pipe_to_channel(pipe); | 7446 | enum dpio_channel port = vlv_pipe_to_channel(pipe); |
| 7370 | u32 loopfilter, tribuf_calcntr; | 7447 | u32 loopfilter, tribuf_calcntr; |
| 7371 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; | 7448 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; |
| @@ -9283,8 +9360,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
| 9283 | 9360 | ||
| 9284 | I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | 9361 | I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); |
| 9285 | I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); | 9362 | I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); |
| 9286 | I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); | 9363 | I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); |
| 9287 | I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); | 9364 | I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); |
| 9288 | I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); | 9365 | I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); |
| 9289 | I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, | 9366 | I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, |
| 9290 | "CPU PWM1 enabled\n"); | 9367 | "CPU PWM1 enabled\n"); |
| @@ -9746,7 +9823,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, | |||
| 9746 | 9823 | ||
| 9747 | port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; | 9824 | port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; |
| 9748 | 9825 | ||
| 9749 | if (IS_SKYLAKE(dev)) | 9826 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) |
| 9750 | skylake_get_ddi_pll(dev_priv, port, pipe_config); | 9827 | skylake_get_ddi_pll(dev_priv, port, pipe_config); |
| 9751 | else if (IS_BROXTON(dev)) | 9828 | else if (IS_BROXTON(dev)) |
| 9752 | bxt_get_ddi_pll(dev_priv, port, pipe_config); | 9829 | bxt_get_ddi_pll(dev_priv, port, pipe_config); |
| @@ -10092,20 +10169,17 @@ __intel_framebuffer_create(struct drm_device *dev, | |||
| 10092 | int ret; | 10169 | int ret; |
| 10093 | 10170 | ||
| 10094 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 10171 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
| 10095 | if (!intel_fb) { | 10172 | if (!intel_fb) |
| 10096 | drm_gem_object_unreference(&obj->base); | ||
| 10097 | return ERR_PTR(-ENOMEM); | 10173 | return ERR_PTR(-ENOMEM); |
| 10098 | } | ||
| 10099 | 10174 | ||
| 10100 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | 10175 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
| 10101 | if (ret) | 10176 | if (ret) |
| 10102 | goto err; | 10177 | goto err; |
| 10103 | 10178 | ||
| 10104 | return &intel_fb->base; | 10179 | return &intel_fb->base; |
| 10180 | |||
| 10105 | err: | 10181 | err: |
| 10106 | drm_gem_object_unreference(&obj->base); | ||
| 10107 | kfree(intel_fb); | 10182 | kfree(intel_fb); |
| 10108 | |||
| 10109 | return ERR_PTR(ret); | 10183 | return ERR_PTR(ret); |
| 10110 | } | 10184 | } |
| 10111 | 10185 | ||
| @@ -10145,6 +10219,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, | |||
| 10145 | struct drm_display_mode *mode, | 10219 | struct drm_display_mode *mode, |
| 10146 | int depth, int bpp) | 10220 | int depth, int bpp) |
| 10147 | { | 10221 | { |
| 10222 | struct drm_framebuffer *fb; | ||
| 10148 | struct drm_i915_gem_object *obj; | 10223 | struct drm_i915_gem_object *obj; |
| 10149 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 10224 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
| 10150 | 10225 | ||
| @@ -10159,7 +10234,11 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, | |||
| 10159 | bpp); | 10234 | bpp); |
| 10160 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); | 10235 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); |
| 10161 | 10236 | ||
| 10162 | return intel_framebuffer_create(dev, &mode_cmd, obj); | 10237 | fb = intel_framebuffer_create(dev, &mode_cmd, obj); |
| 10238 | if (IS_ERR(fb)) | ||
| 10239 | drm_gem_object_unreference_unlocked(&obj->base); | ||
| 10240 | |||
| 10241 | return fb; | ||
| 10163 | } | 10242 | } |
| 10164 | 10243 | ||
| 10165 | static struct drm_framebuffer * | 10244 | static struct drm_framebuffer * |
| @@ -11062,7 +11141,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 11062 | */ | 11141 | */ |
| 11063 | if (ring->id == RCS) { | 11142 | if (ring->id == RCS) { |
| 11064 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 11143 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
| 11065 | intel_ring_emit(ring, DERRMR); | 11144 | intel_ring_emit_reg(ring, DERRMR); |
| 11066 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | | 11145 | intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | |
| 11067 | DERRMR_PIPEB_PRI_FLIP_DONE | | 11146 | DERRMR_PIPEB_PRI_FLIP_DONE | |
| 11068 | DERRMR_PIPEC_PRI_FLIP_DONE)); | 11147 | DERRMR_PIPEC_PRI_FLIP_DONE)); |
| @@ -11072,7 +11151,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 11072 | else | 11151 | else |
| 11073 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM | | 11152 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM | |
| 11074 | MI_SRM_LRM_GLOBAL_GTT); | 11153 | MI_SRM_LRM_GLOBAL_GTT); |
| 11075 | intel_ring_emit(ring, DERRMR); | 11154 | intel_ring_emit_reg(ring, DERRMR); |
| 11076 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | 11155 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); |
| 11077 | if (IS_GEN8(dev)) { | 11156 | if (IS_GEN8(dev)) { |
| 11078 | intel_ring_emit(ring, 0); | 11157 | intel_ring_emit(ring, 0); |
| @@ -11117,13 +11196,14 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, | |||
| 11117 | } | 11196 | } |
| 11118 | 11197 | ||
| 11119 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, | 11198 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, |
| 11199 | unsigned int rotation, | ||
| 11120 | struct intel_unpin_work *work) | 11200 | struct intel_unpin_work *work) |
| 11121 | { | 11201 | { |
| 11122 | struct drm_device *dev = intel_crtc->base.dev; | 11202 | struct drm_device *dev = intel_crtc->base.dev; |
| 11123 | struct drm_i915_private *dev_priv = dev->dev_private; | 11203 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 11124 | struct drm_framebuffer *fb = intel_crtc->base.primary->fb; | 11204 | struct drm_framebuffer *fb = intel_crtc->base.primary->fb; |
| 11125 | const enum pipe pipe = intel_crtc->pipe; | 11205 | const enum pipe pipe = intel_crtc->pipe; |
| 11126 | u32 ctl, stride; | 11206 | u32 ctl, stride, tile_height; |
| 11127 | 11207 | ||
| 11128 | ctl = I915_READ(PLANE_CTL(pipe, 0)); | 11208 | ctl = I915_READ(PLANE_CTL(pipe, 0)); |
| 11129 | ctl &= ~PLANE_CTL_TILED_MASK; | 11209 | ctl &= ~PLANE_CTL_TILED_MASK; |
| @@ -11147,9 +11227,16 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, | |||
| 11147 | * The stride is either expressed as a multiple of 64 bytes chunks for | 11227 | * The stride is either expressed as a multiple of 64 bytes chunks for |
| 11148 | * linear buffers or in number of tiles for tiled buffers. | 11228 | * linear buffers or in number of tiles for tiled buffers. |
| 11149 | */ | 11229 | */ |
| 11150 | stride = fb->pitches[0] / | 11230 | if (intel_rotation_90_or_270(rotation)) { |
| 11151 | intel_fb_stride_alignment(dev, fb->modifier[0], | 11231 | /* stride = Surface height in tiles */ |
| 11152 | fb->pixel_format); | 11232 | tile_height = intel_tile_height(dev, fb->pixel_format, |
| 11233 | fb->modifier[0], 0); | ||
| 11234 | stride = DIV_ROUND_UP(fb->height, tile_height); | ||
| 11235 | } else { | ||
| 11236 | stride = fb->pitches[0] / | ||
| 11237 | intel_fb_stride_alignment(dev, fb->modifier[0], | ||
| 11238 | fb->pixel_format); | ||
| 11239 | } | ||
| 11153 | 11240 | ||
| 11154 | /* | 11241 | /* |
| 11155 | * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on | 11242 | * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on |
| @@ -11170,10 +11257,9 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, | |||
| 11170 | struct intel_framebuffer *intel_fb = | 11257 | struct intel_framebuffer *intel_fb = |
| 11171 | to_intel_framebuffer(intel_crtc->base.primary->fb); | 11258 | to_intel_framebuffer(intel_crtc->base.primary->fb); |
| 11172 | struct drm_i915_gem_object *obj = intel_fb->obj; | 11259 | struct drm_i915_gem_object *obj = intel_fb->obj; |
| 11260 | i915_reg_t reg = DSPCNTR(intel_crtc->plane); | ||
| 11173 | u32 dspcntr; | 11261 | u32 dspcntr; |
| 11174 | u32 reg; | ||
| 11175 | 11262 | ||
| 11176 | reg = DSPCNTR(intel_crtc->plane); | ||
| 11177 | dspcntr = I915_READ(reg); | 11263 | dspcntr = I915_READ(reg); |
| 11178 | 11264 | ||
| 11179 | if (obj->tiling_mode != I915_TILING_NONE) | 11265 | if (obj->tiling_mode != I915_TILING_NONE) |
| @@ -11207,7 +11293,7 @@ static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) | |||
| 11207 | intel_pipe_update_start(crtc); | 11293 | intel_pipe_update_start(crtc); |
| 11208 | 11294 | ||
| 11209 | if (INTEL_INFO(mmio_flip->i915)->gen >= 9) | 11295 | if (INTEL_INFO(mmio_flip->i915)->gen >= 9) |
| 11210 | skl_do_mmio_flip(crtc, work); | 11296 | skl_do_mmio_flip(crtc, mmio_flip->rotation, work); |
| 11211 | else | 11297 | else |
| 11212 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ | 11298 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ |
| 11213 | ilk_do_mmio_flip(crtc, work); | 11299 | ilk_do_mmio_flip(crtc, work); |
| @@ -11234,10 +11320,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work) | |||
| 11234 | 11320 | ||
| 11235 | static int intel_queue_mmio_flip(struct drm_device *dev, | 11321 | static int intel_queue_mmio_flip(struct drm_device *dev, |
| 11236 | struct drm_crtc *crtc, | 11322 | struct drm_crtc *crtc, |
| 11237 | struct drm_framebuffer *fb, | 11323 | struct drm_i915_gem_object *obj) |
| 11238 | struct drm_i915_gem_object *obj, | ||
| 11239 | struct intel_engine_cs *ring, | ||
| 11240 | uint32_t flags) | ||
| 11241 | { | 11324 | { |
| 11242 | struct intel_mmio_flip *mmio_flip; | 11325 | struct intel_mmio_flip *mmio_flip; |
| 11243 | 11326 | ||
| @@ -11248,6 +11331,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev, | |||
| 11248 | mmio_flip->i915 = to_i915(dev); | 11331 | mmio_flip->i915 = to_i915(dev); |
| 11249 | mmio_flip->req = i915_gem_request_reference(obj->last_write_req); | 11332 | mmio_flip->req = i915_gem_request_reference(obj->last_write_req); |
| 11250 | mmio_flip->crtc = to_intel_crtc(crtc); | 11333 | mmio_flip->crtc = to_intel_crtc(crtc); |
| 11334 | mmio_flip->rotation = crtc->primary->state->rotation; | ||
| 11251 | 11335 | ||
| 11252 | INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); | 11336 | INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); |
| 11253 | schedule_work(&mmio_flip->work); | 11337 | schedule_work(&mmio_flip->work); |
| @@ -11453,9 +11537,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11453 | * synchronisation, so all we want here is to pin the framebuffer | 11537 | * synchronisation, so all we want here is to pin the framebuffer |
| 11454 | * into the display plane and skip any waits. | 11538 | * into the display plane and skip any waits. |
| 11455 | */ | 11539 | */ |
| 11540 | if (!mmio_flip) { | ||
| 11541 | ret = i915_gem_object_sync(obj, ring, &request); | ||
| 11542 | if (ret) | ||
| 11543 | goto cleanup_pending; | ||
| 11544 | } | ||
| 11545 | |||
| 11456 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, | 11546 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, |
| 11457 | crtc->primary->state, | 11547 | crtc->primary->state); |
| 11458 | mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request); | ||
| 11459 | if (ret) | 11548 | if (ret) |
| 11460 | goto cleanup_pending; | 11549 | goto cleanup_pending; |
| 11461 | 11550 | ||
| @@ -11464,8 +11553,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 11464 | work->gtt_offset += intel_crtc->dspaddr_offset; | 11553 | work->gtt_offset += intel_crtc->dspaddr_offset; |
| 11465 | 11554 | ||
| 11466 | if (mmio_flip) { | 11555 | if (mmio_flip) { |
| 11467 | ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, | 11556 | ret = intel_queue_mmio_flip(dev, crtc, obj); |
| 11468 | page_flip_flags); | ||
| 11469 | if (ret) | 11557 | if (ret) |
| 11470 | goto cleanup_unpin; | 11558 | goto cleanup_unpin; |
| 11471 | 11559 | ||
| @@ -11579,18 +11667,32 @@ retry: | |||
| 11579 | static bool intel_wm_need_update(struct drm_plane *plane, | 11667 | static bool intel_wm_need_update(struct drm_plane *plane, |
| 11580 | struct drm_plane_state *state) | 11668 | struct drm_plane_state *state) |
| 11581 | { | 11669 | { |
| 11582 | /* Update watermarks on tiling changes. */ | 11670 | struct intel_plane_state *new = to_intel_plane_state(state); |
| 11671 | struct intel_plane_state *cur = to_intel_plane_state(plane->state); | ||
| 11672 | |||
| 11673 | /* Update watermarks on tiling or size changes. */ | ||
| 11583 | if (!plane->state->fb || !state->fb || | 11674 | if (!plane->state->fb || !state->fb || |
| 11584 | plane->state->fb->modifier[0] != state->fb->modifier[0] || | 11675 | plane->state->fb->modifier[0] != state->fb->modifier[0] || |
| 11585 | plane->state->rotation != state->rotation) | 11676 | plane->state->rotation != state->rotation || |
| 11586 | return true; | 11677 | drm_rect_width(&new->src) != drm_rect_width(&cur->src) || |
| 11587 | 11678 | drm_rect_height(&new->src) != drm_rect_height(&cur->src) || | |
| 11588 | if (plane->state->crtc_w != state->crtc_w) | 11679 | drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || |
| 11680 | drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) | ||
| 11589 | return true; | 11681 | return true; |
| 11590 | 11682 | ||
| 11591 | return false; | 11683 | return false; |
| 11592 | } | 11684 | } |
| 11593 | 11685 | ||
| 11686 | static bool needs_scaling(struct intel_plane_state *state) | ||
| 11687 | { | ||
| 11688 | int src_w = drm_rect_width(&state->src) >> 16; | ||
| 11689 | int src_h = drm_rect_height(&state->src) >> 16; | ||
| 11690 | int dst_w = drm_rect_width(&state->dst); | ||
| 11691 | int dst_h = drm_rect_height(&state->dst); | ||
| 11692 | |||
| 11693 | return (src_w != dst_w || src_h != dst_h); | ||
| 11694 | } | ||
| 11695 | |||
| 11594 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | 11696 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, |
| 11595 | struct drm_plane_state *plane_state) | 11697 | struct drm_plane_state *plane_state) |
| 11596 | { | 11698 | { |
| @@ -11606,7 +11708,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
| 11606 | bool mode_changed = needs_modeset(crtc_state); | 11708 | bool mode_changed = needs_modeset(crtc_state); |
| 11607 | bool was_crtc_enabled = crtc->state->active; | 11709 | bool was_crtc_enabled = crtc->state->active; |
| 11608 | bool is_crtc_enabled = crtc_state->active; | 11710 | bool is_crtc_enabled = crtc_state->active; |
| 11609 | |||
| 11610 | bool turn_off, turn_on, visible, was_visible; | 11711 | bool turn_off, turn_on, visible, was_visible; |
| 11611 | struct drm_framebuffer *fb = plane_state->fb; | 11712 | struct drm_framebuffer *fb = plane_state->fb; |
| 11612 | 11713 | ||
| @@ -11619,14 +11720,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
| 11619 | return ret; | 11720 | return ret; |
| 11620 | } | 11721 | } |
| 11621 | 11722 | ||
| 11622 | /* | ||
| 11623 | * Disabling a plane is always okay; we just need to update | ||
| 11624 | * fb tracking in a special way since cleanup_fb() won't | ||
| 11625 | * get called by the plane helpers. | ||
| 11626 | */ | ||
| 11627 | if (old_plane_state->base.fb && !fb) | ||
| 11628 | intel_crtc->atomic.disabled_planes |= 1 << i; | ||
| 11629 | |||
| 11630 | was_visible = old_plane_state->visible; | 11723 | was_visible = old_plane_state->visible; |
| 11631 | visible = to_intel_plane_state(plane_state)->visible; | 11724 | visible = to_intel_plane_state(plane_state)->visible; |
| 11632 | 11725 | ||
| @@ -11676,7 +11769,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
| 11676 | 11769 | ||
| 11677 | switch (plane->type) { | 11770 | switch (plane->type) { |
| 11678 | case DRM_PLANE_TYPE_PRIMARY: | 11771 | case DRM_PLANE_TYPE_PRIMARY: |
| 11679 | intel_crtc->atomic.wait_for_flips = true; | ||
| 11680 | intel_crtc->atomic.pre_disable_primary = turn_off; | 11772 | intel_crtc->atomic.pre_disable_primary = turn_off; |
| 11681 | intel_crtc->atomic.post_enable_primary = turn_on; | 11773 | intel_crtc->atomic.post_enable_primary = turn_on; |
| 11682 | 11774 | ||
| @@ -11724,11 +11816,23 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
| 11724 | case DRM_PLANE_TYPE_CURSOR: | 11816 | case DRM_PLANE_TYPE_CURSOR: |
| 11725 | break; | 11817 | break; |
| 11726 | case DRM_PLANE_TYPE_OVERLAY: | 11818 | case DRM_PLANE_TYPE_OVERLAY: |
| 11727 | if (turn_off && !mode_changed) { | 11819 | /* |
| 11820 | * WaCxSRDisabledForSpriteScaling:ivb | ||
| 11821 | * | ||
| 11822 | * cstate->update_wm was already set above, so this flag will | ||
| 11823 | * take effect when we commit and program watermarks. | ||
| 11824 | */ | ||
| 11825 | if (IS_IVYBRIDGE(dev) && | ||
| 11826 | needs_scaling(to_intel_plane_state(plane_state)) && | ||
| 11827 | !needs_scaling(old_plane_state)) { | ||
| 11828 | to_intel_crtc_state(crtc_state)->disable_lp_wm = true; | ||
| 11829 | } else if (turn_off && !mode_changed) { | ||
| 11728 | intel_crtc->atomic.wait_vblank = true; | 11830 | intel_crtc->atomic.wait_vblank = true; |
| 11729 | intel_crtc->atomic.update_sprite_watermarks |= | 11831 | intel_crtc->atomic.update_sprite_watermarks |= |
| 11730 | 1 << i; | 11832 | 1 << i; |
| 11731 | } | 11833 | } |
| 11834 | |||
| 11835 | break; | ||
| 11732 | } | 11836 | } |
| 11733 | return 0; | 11837 | return 0; |
| 11734 | } | 11838 | } |
| @@ -11813,6 +11917,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 11813 | } | 11917 | } |
| 11814 | 11918 | ||
| 11815 | ret = 0; | 11919 | ret = 0; |
| 11920 | if (dev_priv->display.compute_pipe_wm) { | ||
| 11921 | ret = dev_priv->display.compute_pipe_wm(intel_crtc, state); | ||
| 11922 | if (ret) | ||
| 11923 | return ret; | ||
| 11924 | } | ||
| 11925 | |||
| 11816 | if (INTEL_INFO(dev)->gen >= 9) { | 11926 | if (INTEL_INFO(dev)->gen >= 9) { |
| 11817 | if (mode_changed) | 11927 | if (mode_changed) |
| 11818 | ret = skl_update_scaler_crtc(pipe_config); | 11928 | ret = skl_update_scaler_crtc(pipe_config); |
| @@ -12002,7 +12112,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 12002 | pipe_config->dpll_hw_state.pll9, | 12112 | pipe_config->dpll_hw_state.pll9, |
| 12003 | pipe_config->dpll_hw_state.pll10, | 12113 | pipe_config->dpll_hw_state.pll10, |
| 12004 | pipe_config->dpll_hw_state.pcsdw12); | 12114 | pipe_config->dpll_hw_state.pcsdw12); |
| 12005 | } else if (IS_SKYLAKE(dev)) { | 12115 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
| 12006 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " | 12116 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " |
| 12007 | "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", | 12117 | "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", |
| 12008 | pipe_config->ddi_pll_sel, | 12118 | pipe_config->ddi_pll_sel, |
| @@ -12256,6 +12366,18 @@ intel_modeset_update_crtc_state(struct drm_atomic_state *state) | |||
| 12256 | crtc->hwmode = crtc->state->adjusted_mode; | 12366 | crtc->hwmode = crtc->state->adjusted_mode; |
| 12257 | else | 12367 | else |
| 12258 | crtc->hwmode.crtc_clock = 0; | 12368 | crtc->hwmode.crtc_clock = 0; |
| 12369 | |||
| 12370 | /* | ||
| 12371 | * Update legacy state to satisfy fbc code. This can | ||
| 12372 | * be removed when fbc uses the atomic state. | ||
| 12373 | */ | ||
| 12374 | if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { | ||
| 12375 | struct drm_plane_state *plane_state = crtc->primary->state; | ||
| 12376 | |||
| 12377 | crtc->primary->fb = plane_state->fb; | ||
| 12378 | crtc->x = plane_state->src_x >> 16; | ||
| 12379 | crtc->y = plane_state->src_y >> 16; | ||
| 12380 | } | ||
| 12259 | } | 12381 | } |
| 12260 | } | 12382 | } |
| 12261 | 12383 | ||
| @@ -13020,6 +13142,45 @@ static int intel_modeset_checks(struct drm_atomic_state *state) | |||
| 13020 | return 0; | 13142 | return 0; |
| 13021 | } | 13143 | } |
| 13022 | 13144 | ||
| 13145 | /* | ||
| 13146 | * Handle calculation of various watermark data at the end of the atomic check | ||
| 13147 | * phase. The code here should be run after the per-crtc and per-plane 'check' | ||
| 13148 | * handlers to ensure that all derived state has been updated. | ||
| 13149 | */ | ||
| 13150 | static void calc_watermark_data(struct drm_atomic_state *state) | ||
| 13151 | { | ||
| 13152 | struct drm_device *dev = state->dev; | ||
| 13153 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
| 13154 | struct drm_crtc *crtc; | ||
| 13155 | struct drm_crtc_state *cstate; | ||
| 13156 | struct drm_plane *plane; | ||
| 13157 | struct drm_plane_state *pstate; | ||
| 13158 | |||
| 13159 | /* | ||
| 13160 | * Calculate watermark configuration details now that derived | ||
| 13161 | * plane/crtc state is all properly updated. | ||
| 13162 | */ | ||
| 13163 | drm_for_each_crtc(crtc, dev) { | ||
| 13164 | cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: | ||
| 13165 | crtc->state; | ||
| 13166 | |||
| 13167 | if (cstate->active) | ||
| 13168 | intel_state->wm_config.num_pipes_active++; | ||
| 13169 | } | ||
| 13170 | drm_for_each_legacy_plane(plane, dev) { | ||
| 13171 | pstate = drm_atomic_get_existing_plane_state(state, plane) ?: | ||
| 13172 | plane->state; | ||
| 13173 | |||
| 13174 | if (!to_intel_plane_state(pstate)->visible) | ||
| 13175 | continue; | ||
| 13176 | |||
| 13177 | intel_state->wm_config.sprites_enabled = true; | ||
| 13178 | if (pstate->crtc_w != pstate->src_w >> 16 || | ||
| 13179 | pstate->crtc_h != pstate->src_h >> 16) | ||
| 13180 | intel_state->wm_config.sprites_scaled = true; | ||
| 13181 | } | ||
| 13182 | } | ||
| 13183 | |||
| 13023 | /** | 13184 | /** |
| 13024 | * intel_atomic_check - validate state object | 13185 | * intel_atomic_check - validate state object |
| 13025 | * @dev: drm device | 13186 | * @dev: drm device |
| @@ -13028,6 +13189,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) | |||
| 13028 | static int intel_atomic_check(struct drm_device *dev, | 13189 | static int intel_atomic_check(struct drm_device *dev, |
| 13029 | struct drm_atomic_state *state) | 13190 | struct drm_atomic_state *state) |
| 13030 | { | 13191 | { |
| 13192 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
| 13031 | struct drm_crtc *crtc; | 13193 | struct drm_crtc *crtc; |
| 13032 | struct drm_crtc_state *crtc_state; | 13194 | struct drm_crtc_state *crtc_state; |
| 13033 | int ret, i; | 13195 | int ret, i; |
| @@ -13095,10 +13257,81 @@ static int intel_atomic_check(struct drm_device *dev, | |||
| 13095 | if (ret) | 13257 | if (ret) |
| 13096 | return ret; | 13258 | return ret; |
| 13097 | } else | 13259 | } else |
| 13098 | to_intel_atomic_state(state)->cdclk = | 13260 | intel_state->cdclk = to_i915(state->dev)->cdclk_freq; |
| 13099 | to_i915(state->dev)->cdclk_freq; | 13261 | |
| 13262 | ret = drm_atomic_helper_check_planes(state->dev, state); | ||
| 13263 | if (ret) | ||
| 13264 | return ret; | ||
| 13265 | |||
| 13266 | calc_watermark_data(state); | ||
| 13267 | |||
| 13268 | return 0; | ||
| 13269 | } | ||
| 13270 | |||
| 13271 | static int intel_atomic_prepare_commit(struct drm_device *dev, | ||
| 13272 | struct drm_atomic_state *state, | ||
| 13273 | bool async) | ||
| 13274 | { | ||
| 13275 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 13276 | struct drm_plane_state *plane_state; | ||
| 13277 | struct drm_crtc_state *crtc_state; | ||
| 13278 | struct drm_plane *plane; | ||
| 13279 | struct drm_crtc *crtc; | ||
| 13280 | int i, ret; | ||
| 13281 | |||
| 13282 | if (async) { | ||
| 13283 | DRM_DEBUG_KMS("i915 does not yet support async commit\n"); | ||
| 13284 | return -EINVAL; | ||
| 13285 | } | ||
| 13100 | 13286 | ||
| 13101 | return drm_atomic_helper_check_planes(state->dev, state); | 13287 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 13288 | ret = intel_crtc_wait_for_pending_flips(crtc); | ||
| 13289 | if (ret) | ||
| 13290 | return ret; | ||
| 13291 | |||
| 13292 | if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) | ||
| 13293 | flush_workqueue(dev_priv->wq); | ||
| 13294 | } | ||
| 13295 | |||
| 13296 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
| 13297 | if (ret) | ||
| 13298 | return ret; | ||
| 13299 | |||
| 13300 | ret = drm_atomic_helper_prepare_planes(dev, state); | ||
| 13301 | if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { | ||
| 13302 | u32 reset_counter; | ||
| 13303 | |||
| 13304 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
| 13305 | mutex_unlock(&dev->struct_mutex); | ||
| 13306 | |||
| 13307 | for_each_plane_in_state(state, plane, plane_state, i) { | ||
| 13308 | struct intel_plane_state *intel_plane_state = | ||
| 13309 | to_intel_plane_state(plane_state); | ||
| 13310 | |||
| 13311 | if (!intel_plane_state->wait_req) | ||
| 13312 | continue; | ||
| 13313 | |||
| 13314 | ret = __i915_wait_request(intel_plane_state->wait_req, | ||
| 13315 | reset_counter, true, | ||
| 13316 | NULL, NULL); | ||
| 13317 | |||
| 13318 | /* Swallow -EIO errors to allow updates during hw lockup. */ | ||
| 13319 | if (ret == -EIO) | ||
| 13320 | ret = 0; | ||
| 13321 | |||
| 13322 | if (ret) | ||
| 13323 | break; | ||
| 13324 | } | ||
| 13325 | |||
| 13326 | if (!ret) | ||
| 13327 | return 0; | ||
| 13328 | |||
| 13329 | mutex_lock(&dev->struct_mutex); | ||
| 13330 | drm_atomic_helper_cleanup_planes(dev, state); | ||
| 13331 | } | ||
| 13332 | |||
| 13333 | mutex_unlock(&dev->struct_mutex); | ||
| 13334 | return ret; | ||
| 13102 | } | 13335 | } |
| 13103 | 13336 | ||
| 13104 | /** | 13337 | /** |
| @@ -13122,22 +13355,20 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13122 | bool async) | 13355 | bool async) |
| 13123 | { | 13356 | { |
| 13124 | struct drm_i915_private *dev_priv = dev->dev_private; | 13357 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 13125 | struct drm_crtc *crtc; | ||
| 13126 | struct drm_crtc_state *crtc_state; | 13358 | struct drm_crtc_state *crtc_state; |
| 13359 | struct drm_crtc *crtc; | ||
| 13127 | int ret = 0; | 13360 | int ret = 0; |
| 13128 | int i; | 13361 | int i; |
| 13129 | bool any_ms = false; | 13362 | bool any_ms = false; |
| 13130 | 13363 | ||
| 13131 | if (async) { | 13364 | ret = intel_atomic_prepare_commit(dev, state, async); |
| 13132 | DRM_DEBUG_KMS("i915 does not yet support async commit\n"); | 13365 | if (ret) { |
| 13133 | return -EINVAL; | 13366 | DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); |
| 13134 | } | ||
| 13135 | |||
| 13136 | ret = drm_atomic_helper_prepare_planes(dev, state); | ||
| 13137 | if (ret) | ||
| 13138 | return ret; | 13367 | return ret; |
| 13368 | } | ||
| 13139 | 13369 | ||
| 13140 | drm_atomic_helper_swap_state(dev, state); | 13370 | drm_atomic_helper_swap_state(dev, state); |
| 13371 | dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; | ||
| 13141 | 13372 | ||
| 13142 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13373 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 13143 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 13374 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| @@ -13175,6 +13406,9 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13175 | to_intel_crtc_state(crtc->state)->update_pipe; | 13406 | to_intel_crtc_state(crtc->state)->update_pipe; |
| 13176 | unsigned long put_domains = 0; | 13407 | unsigned long put_domains = 0; |
| 13177 | 13408 | ||
| 13409 | if (modeset) | ||
| 13410 | intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); | ||
| 13411 | |||
| 13178 | if (modeset && crtc->state->active) { | 13412 | if (modeset && crtc->state->active) { |
| 13179 | update_scanline_offset(to_intel_crtc(crtc)); | 13413 | update_scanline_offset(to_intel_crtc(crtc)); |
| 13180 | dev_priv->display.crtc_enable(crtc); | 13414 | dev_priv->display.crtc_enable(crtc); |
| @@ -13190,18 +13424,26 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
| 13190 | if (!modeset) | 13424 | if (!modeset) |
| 13191 | intel_pre_plane_update(intel_crtc); | 13425 | intel_pre_plane_update(intel_crtc); |
| 13192 | 13426 | ||
| 13193 | drm_atomic_helper_commit_planes_on_crtc(crtc_state); | 13427 | if (crtc->state->active && |
| 13428 | (crtc->state->planes_changed || update_pipe)) | ||
| 13429 | drm_atomic_helper_commit_planes_on_crtc(crtc_state); | ||
| 13194 | 13430 | ||
| 13195 | if (put_domains) | 13431 | if (put_domains) |
| 13196 | modeset_put_power_domains(dev_priv, put_domains); | 13432 | modeset_put_power_domains(dev_priv, put_domains); |
| 13197 | 13433 | ||
| 13198 | intel_post_plane_update(intel_crtc); | 13434 | intel_post_plane_update(intel_crtc); |
| 13435 | |||
| 13436 | if (modeset) | ||
| 13437 | intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); | ||
| 13199 | } | 13438 | } |
| 13200 | 13439 | ||
| 13201 | /* FIXME: add subpixel order */ | 13440 | /* FIXME: add subpixel order */ |
| 13202 | 13441 | ||
| 13203 | drm_atomic_helper_wait_for_vblanks(dev, state); | 13442 | drm_atomic_helper_wait_for_vblanks(dev, state); |
| 13443 | |||
| 13444 | mutex_lock(&dev->struct_mutex); | ||
| 13204 | drm_atomic_helper_cleanup_planes(dev, state); | 13445 | drm_atomic_helper_cleanup_planes(dev, state); |
| 13446 | mutex_unlock(&dev->struct_mutex); | ||
| 13205 | 13447 | ||
| 13206 | if (any_ms) | 13448 | if (any_ms) |
| 13207 | intel_modeset_check_state(dev, state); | 13449 | intel_modeset_check_state(dev, state); |
| @@ -13370,6 +13612,8 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
| 13370 | * bits. Some older platforms need special physical address handling for | 13612 | * bits. Some older platforms need special physical address handling for |
| 13371 | * cursor planes. | 13613 | * cursor planes. |
| 13372 | * | 13614 | * |
| 13615 | * Must be called with struct_mutex held. | ||
| 13616 | * | ||
| 13373 | * Returns 0 on success, negative error code on failure. | 13617 | * Returns 0 on success, negative error code on failure. |
| 13374 | */ | 13618 | */ |
| 13375 | int | 13619 | int |
| @@ -13380,28 +13624,58 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13380 | struct drm_framebuffer *fb = new_state->fb; | 13624 | struct drm_framebuffer *fb = new_state->fb; |
| 13381 | struct intel_plane *intel_plane = to_intel_plane(plane); | 13625 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 13382 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 13626 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
| 13383 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); | 13627 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); |
| 13384 | int ret = 0; | 13628 | int ret = 0; |
| 13385 | 13629 | ||
| 13386 | if (!obj) | 13630 | if (!obj && !old_obj) |
| 13387 | return 0; | 13631 | return 0; |
| 13388 | 13632 | ||
| 13389 | mutex_lock(&dev->struct_mutex); | 13633 | if (old_obj) { |
| 13634 | struct drm_crtc_state *crtc_state = | ||
| 13635 | drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); | ||
| 13636 | |||
| 13637 | /* Big Hammer, we also need to ensure that any pending | ||
| 13638 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
| 13639 | * current scanout is retired before unpinning the old | ||
| 13640 | * framebuffer. Note that we rely on userspace rendering | ||
| 13641 | * into the buffer attached to the pipe they are waiting | ||
| 13642 | * on. If not, userspace generates a GPU hang with IPEHR | ||
| 13643 | * point to the MI_WAIT_FOR_EVENT. | ||
| 13644 | * | ||
| 13645 | * This should only fail upon a hung GPU, in which case we | ||
| 13646 | * can safely continue. | ||
| 13647 | */ | ||
| 13648 | if (needs_modeset(crtc_state)) | ||
| 13649 | ret = i915_gem_object_wait_rendering(old_obj, true); | ||
| 13390 | 13650 | ||
| 13391 | if (plane->type == DRM_PLANE_TYPE_CURSOR && | 13651 | /* Swallow -EIO errors to allow updates during hw lockup. */ |
| 13652 | if (ret && ret != -EIO) | ||
| 13653 | return ret; | ||
| 13654 | } | ||
| 13655 | |||
| 13656 | if (!obj) { | ||
| 13657 | ret = 0; | ||
| 13658 | } else if (plane->type == DRM_PLANE_TYPE_CURSOR && | ||
| 13392 | INTEL_INFO(dev)->cursor_needs_physical) { | 13659 | INTEL_INFO(dev)->cursor_needs_physical) { |
| 13393 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 13660 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
| 13394 | ret = i915_gem_object_attach_phys(obj, align); | 13661 | ret = i915_gem_object_attach_phys(obj, align); |
| 13395 | if (ret) | 13662 | if (ret) |
| 13396 | DRM_DEBUG_KMS("failed to attach phys object\n"); | 13663 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
| 13397 | } else { | 13664 | } else { |
| 13398 | ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); | 13665 | ret = intel_pin_and_fence_fb_obj(plane, fb, new_state); |
| 13399 | } | 13666 | } |
| 13400 | 13667 | ||
| 13401 | if (ret == 0) | 13668 | if (ret == 0) { |
| 13402 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | 13669 | if (obj) { |
| 13670 | struct intel_plane_state *plane_state = | ||
| 13671 | to_intel_plane_state(new_state); | ||
| 13403 | 13672 | ||
| 13404 | mutex_unlock(&dev->struct_mutex); | 13673 | i915_gem_request_assign(&plane_state->wait_req, |
| 13674 | obj->last_write_req); | ||
| 13675 | } | ||
| 13676 | |||
| 13677 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | ||
| 13678 | } | ||
| 13405 | 13679 | ||
| 13406 | return ret; | 13680 | return ret; |
| 13407 | } | 13681 | } |
| @@ -13412,23 +13686,35 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
| 13412 | * @fb: old framebuffer that was on plane | 13686 | * @fb: old framebuffer that was on plane |
| 13413 | * | 13687 | * |
| 13414 | * Cleans up a framebuffer that has just been removed from a plane. | 13688 | * Cleans up a framebuffer that has just been removed from a plane. |
| 13689 | * | ||
| 13690 | * Must be called with struct_mutex held. | ||
| 13415 | */ | 13691 | */ |
| 13416 | void | 13692 | void |
| 13417 | intel_cleanup_plane_fb(struct drm_plane *plane, | 13693 | intel_cleanup_plane_fb(struct drm_plane *plane, |
| 13418 | const struct drm_plane_state *old_state) | 13694 | const struct drm_plane_state *old_state) |
| 13419 | { | 13695 | { |
| 13420 | struct drm_device *dev = plane->dev; | 13696 | struct drm_device *dev = plane->dev; |
| 13421 | struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb); | 13697 | struct intel_plane *intel_plane = to_intel_plane(plane); |
| 13698 | struct intel_plane_state *old_intel_state; | ||
| 13699 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); | ||
| 13700 | struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); | ||
| 13422 | 13701 | ||
| 13423 | if (!obj) | 13702 | old_intel_state = to_intel_plane_state(old_state); |
| 13703 | |||
| 13704 | if (!obj && !old_obj) | ||
| 13424 | return; | 13705 | return; |
| 13425 | 13706 | ||
| 13426 | if (plane->type != DRM_PLANE_TYPE_CURSOR || | 13707 | if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || |
| 13427 | !INTEL_INFO(dev)->cursor_needs_physical) { | 13708 | !INTEL_INFO(dev)->cursor_needs_physical)) |
| 13428 | mutex_lock(&dev->struct_mutex); | ||
| 13429 | intel_unpin_fb_obj(old_state->fb, old_state); | 13709 | intel_unpin_fb_obj(old_state->fb, old_state); |
| 13430 | mutex_unlock(&dev->struct_mutex); | 13710 | |
| 13431 | } | 13711 | /* prepare_fb aborted? */ |
| 13712 | if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || | ||
| 13713 | (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) | ||
| 13714 | i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); | ||
| 13715 | |||
| 13716 | i915_gem_request_assign(&old_intel_state->wait_req, NULL); | ||
| 13717 | |||
| 13432 | } | 13718 | } |
| 13433 | 13719 | ||
| 13434 | int | 13720 | int |
| @@ -13447,7 +13733,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state | |||
| 13447 | crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; | 13733 | crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; |
| 13448 | cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; | 13734 | cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; |
| 13449 | 13735 | ||
| 13450 | if (!crtc_clock || !cdclk) | 13736 | if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) |
| 13451 | return DRM_PLANE_HELPER_NO_SCALING; | 13737 | return DRM_PLANE_HELPER_NO_SCALING; |
| 13452 | 13738 | ||
| 13453 | /* | 13739 | /* |
| @@ -13495,18 +13781,8 @@ intel_commit_primary_plane(struct drm_plane *plane, | |||
| 13495 | struct drm_framebuffer *fb = state->base.fb; | 13781 | struct drm_framebuffer *fb = state->base.fb; |
| 13496 | struct drm_device *dev = plane->dev; | 13782 | struct drm_device *dev = plane->dev; |
| 13497 | struct drm_i915_private *dev_priv = dev->dev_private; | 13783 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 13498 | struct intel_crtc *intel_crtc; | ||
| 13499 | struct drm_rect *src = &state->src; | ||
| 13500 | 13784 | ||
| 13501 | crtc = crtc ? crtc : plane->crtc; | 13785 | crtc = crtc ? crtc : plane->crtc; |
| 13502 | intel_crtc = to_intel_crtc(crtc); | ||
| 13503 | |||
| 13504 | plane->fb = fb; | ||
| 13505 | crtc->x = src->x1 >> 16; | ||
| 13506 | crtc->y = src->y1 >> 16; | ||
| 13507 | |||
| 13508 | if (!crtc->state->active) | ||
| 13509 | return; | ||
| 13510 | 13786 | ||
| 13511 | dev_priv->display.update_primary_plane(crtc, fb, | 13787 | dev_priv->display.update_primary_plane(crtc, fb, |
| 13512 | state->src.x1 >> 16, | 13788 | state->src.x1 >> 16, |
| @@ -13536,8 +13812,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, | |||
| 13536 | intel_update_watermarks(crtc); | 13812 | intel_update_watermarks(crtc); |
| 13537 | 13813 | ||
| 13538 | /* Perform vblank evasion around commit operation */ | 13814 | /* Perform vblank evasion around commit operation */ |
| 13539 | if (crtc->state->active) | 13815 | intel_pipe_update_start(intel_crtc); |
| 13540 | intel_pipe_update_start(intel_crtc); | ||
| 13541 | 13816 | ||
| 13542 | if (modeset) | 13817 | if (modeset) |
| 13543 | return; | 13818 | return; |
| @@ -13553,8 +13828,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, | |||
| 13553 | { | 13828 | { |
| 13554 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 13829 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 13555 | 13830 | ||
| 13556 | if (crtc->state->active) | 13831 | intel_pipe_update_end(intel_crtc); |
| 13557 | intel_pipe_update_end(intel_crtc); | ||
| 13558 | } | 13832 | } |
| 13559 | 13833 | ||
| 13560 | /** | 13834 | /** |
| @@ -13737,8 +14011,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
| 13737 | intel_crtc->cursor_bo = obj; | 14011 | intel_crtc->cursor_bo = obj; |
| 13738 | 14012 | ||
| 13739 | update: | 14013 | update: |
| 13740 | if (crtc->state->active) | 14014 | intel_crtc_update_cursor(crtc, state->visible); |
| 13741 | intel_crtc_update_cursor(crtc, state->visible); | ||
| 13742 | } | 14015 | } |
| 13743 | 14016 | ||
| 13744 | static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, | 14017 | static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, |
| @@ -14010,7 +14283,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14010 | */ | 14283 | */ |
| 14011 | found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; | 14284 | found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; |
| 14012 | /* WaIgnoreDDIAStrap: skl */ | 14285 | /* WaIgnoreDDIAStrap: skl */ |
| 14013 | if (found || IS_SKYLAKE(dev)) | 14286 | if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) |
| 14014 | intel_ddi_init(dev, PORT_A); | 14287 | intel_ddi_init(dev, PORT_A); |
| 14015 | 14288 | ||
| 14016 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP | 14289 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
| @@ -14026,7 +14299,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14026 | /* | 14299 | /* |
| 14027 | * On SKL we don't have a way to detect DDI-E so we rely on VBT. | 14300 | * On SKL we don't have a way to detect DDI-E so we rely on VBT. |
| 14028 | */ | 14301 | */ |
| 14029 | if (IS_SKYLAKE(dev) && | 14302 | if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && |
| 14030 | (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || | 14303 | (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || |
| 14031 | dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || | 14304 | dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || |
| 14032 | dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) | 14305 | dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) |
| @@ -14041,7 +14314,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14041 | 14314 | ||
| 14042 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { | 14315 | if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { |
| 14043 | /* PCH SDVOB multiplex with HDMIB */ | 14316 | /* PCH SDVOB multiplex with HDMIB */ |
| 14044 | found = intel_sdvo_init(dev, PCH_SDVOB, true); | 14317 | found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); |
| 14045 | if (!found) | 14318 | if (!found) |
| 14046 | intel_hdmi_init(dev, PCH_HDMIB, PORT_B); | 14319 | intel_hdmi_init(dev, PCH_HDMIB, PORT_B); |
| 14047 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | 14320 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
| @@ -14097,7 +14370,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14097 | 14370 | ||
| 14098 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 14371 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
| 14099 | DRM_DEBUG_KMS("probing SDVOB\n"); | 14372 | DRM_DEBUG_KMS("probing SDVOB\n"); |
| 14100 | found = intel_sdvo_init(dev, GEN3_SDVOB, true); | 14373 | found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); |
| 14101 | if (!found && IS_G4X(dev)) { | 14374 | if (!found && IS_G4X(dev)) { |
| 14102 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | 14375 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
| 14103 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); | 14376 | intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); |
| @@ -14111,7 +14384,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14111 | 14384 | ||
| 14112 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { | 14385 | if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { |
| 14113 | DRM_DEBUG_KMS("probing SDVOC\n"); | 14386 | DRM_DEBUG_KMS("probing SDVOC\n"); |
| 14114 | found = intel_sdvo_init(dev, GEN3_SDVOC, false); | 14387 | found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); |
| 14115 | } | 14388 | } |
| 14116 | 14389 | ||
| 14117 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { | 14390 | if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { |
| @@ -14379,6 +14652,7 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
| 14379 | struct drm_file *filp, | 14652 | struct drm_file *filp, |
| 14380 | const struct drm_mode_fb_cmd2 *user_mode_cmd) | 14653 | const struct drm_mode_fb_cmd2 *user_mode_cmd) |
| 14381 | { | 14654 | { |
| 14655 | struct drm_framebuffer *fb; | ||
| 14382 | struct drm_i915_gem_object *obj; | 14656 | struct drm_i915_gem_object *obj; |
| 14383 | struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; | 14657 | struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; |
| 14384 | 14658 | ||
| @@ -14387,7 +14661,11 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
| 14387 | if (&obj->base == NULL) | 14661 | if (&obj->base == NULL) |
| 14388 | return ERR_PTR(-ENOENT); | 14662 | return ERR_PTR(-ENOENT); |
| 14389 | 14663 | ||
| 14390 | return intel_framebuffer_create(dev, &mode_cmd, obj); | 14664 | fb = intel_framebuffer_create(dev, &mode_cmd, obj); |
| 14665 | if (IS_ERR(fb)) | ||
| 14666 | drm_gem_object_unreference_unlocked(&obj->base); | ||
| 14667 | |||
| 14668 | return fb; | ||
| 14391 | } | 14669 | } |
| 14392 | 14670 | ||
| 14393 | #ifndef CONFIG_DRM_FBDEV_EMULATION | 14671 | #ifndef CONFIG_DRM_FBDEV_EMULATION |
| @@ -14472,7 +14750,7 @@ static void intel_init_display(struct drm_device *dev) | |||
| 14472 | } | 14750 | } |
| 14473 | 14751 | ||
| 14474 | /* Returns the core display clock speed */ | 14752 | /* Returns the core display clock speed */ |
| 14475 | if (IS_SKYLAKE(dev)) | 14753 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) |
| 14476 | dev_priv->display.get_display_clock_speed = | 14754 | dev_priv->display.get_display_clock_speed = |
| 14477 | skylake_get_display_clock_speed; | 14755 | skylake_get_display_clock_speed; |
| 14478 | else if (IS_BROXTON(dev)) | 14756 | else if (IS_BROXTON(dev)) |
| @@ -14761,7 +15039,7 @@ static void i915_disable_vga(struct drm_device *dev) | |||
| 14761 | { | 15039 | { |
| 14762 | struct drm_i915_private *dev_priv = dev->dev_private; | 15040 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 14763 | u8 sr1; | 15041 | u8 sr1; |
| 14764 | u32 vga_reg = i915_vgacntrl_reg(dev); | 15042 | i915_reg_t vga_reg = i915_vgacntrl_reg(dev); |
| 14765 | 15043 | ||
| 14766 | /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ | 15044 | /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ |
| 14767 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | 15045 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
| @@ -14877,9 +15155,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
| 14877 | i915_disable_vga(dev); | 15155 | i915_disable_vga(dev); |
| 14878 | intel_setup_outputs(dev); | 15156 | intel_setup_outputs(dev); |
| 14879 | 15157 | ||
| 14880 | /* Just in case the BIOS is doing something questionable. */ | ||
| 14881 | intel_fbc_disable(dev_priv); | ||
| 14882 | |||
| 14883 | drm_modeset_lock_all(dev); | 15158 | drm_modeset_lock_all(dev); |
| 14884 | intel_modeset_setup_hw_state(dev); | 15159 | intel_modeset_setup_hw_state(dev); |
| 14885 | drm_modeset_unlock_all(dev); | 15160 | drm_modeset_unlock_all(dev); |
| @@ -14966,10 +15241,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 14966 | { | 15241 | { |
| 14967 | struct drm_device *dev = crtc->base.dev; | 15242 | struct drm_device *dev = crtc->base.dev; |
| 14968 | struct drm_i915_private *dev_priv = dev->dev_private; | 15243 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 14969 | u32 reg; | 15244 | i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder); |
| 14970 | 15245 | ||
| 14971 | /* Clear any frame start delays used for debugging left by the BIOS */ | 15246 | /* Clear any frame start delays used for debugging left by the BIOS */ |
| 14972 | reg = PIPECONF(crtc->config->cpu_transcoder); | ||
| 14973 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | 15247 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
| 14974 | 15248 | ||
| 14975 | /* restore vblank interrupts to correct state */ | 15249 | /* restore vblank interrupts to correct state */ |
| @@ -15123,7 +15397,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
| 15123 | void i915_redisable_vga_power_on(struct drm_device *dev) | 15397 | void i915_redisable_vga_power_on(struct drm_device *dev) |
| 15124 | { | 15398 | { |
| 15125 | struct drm_i915_private *dev_priv = dev->dev_private; | 15399 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 15126 | u32 vga_reg = i915_vgacntrl_reg(dev); | 15400 | i915_reg_t vga_reg = i915_vgacntrl_reg(dev); |
| 15127 | 15401 | ||
| 15128 | if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { | 15402 | if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { |
| 15129 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 15403 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
| @@ -15162,7 +15436,7 @@ static void readout_plane_state(struct intel_crtc *crtc) | |||
| 15162 | struct intel_plane_state *plane_state = | 15436 | struct intel_plane_state *plane_state = |
| 15163 | to_intel_plane_state(primary->state); | 15437 | to_intel_plane_state(primary->state); |
| 15164 | 15438 | ||
| 15165 | plane_state->visible = | 15439 | plane_state->visible = crtc->active && |
| 15166 | primary_get_hw_state(to_intel_plane(primary)); | 15440 | primary_get_hw_state(to_intel_plane(primary)); |
| 15167 | 15441 | ||
| 15168 | if (plane_state->visible) | 15442 | if (plane_state->visible) |
| @@ -15419,8 +15693,7 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
| 15419 | mutex_lock(&dev->struct_mutex); | 15693 | mutex_lock(&dev->struct_mutex); |
| 15420 | ret = intel_pin_and_fence_fb_obj(c->primary, | 15694 | ret = intel_pin_and_fence_fb_obj(c->primary, |
| 15421 | c->primary->fb, | 15695 | c->primary->fb, |
| 15422 | c->primary->state, | 15696 | c->primary->state); |
| 15423 | NULL, NULL); | ||
| 15424 | mutex_unlock(&dev->struct_mutex); | 15697 | mutex_unlock(&dev->struct_mutex); |
| 15425 | if (ret) { | 15698 | if (ret) { |
| 15426 | DRM_ERROR("failed to pin boot fb on pipe %d\n", | 15699 | DRM_ERROR("failed to pin boot fb on pipe %d\n", |
