aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c525
1 files changed, 342 insertions, 183 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..897a791662c5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2493 uint32_t method1, method2; 2493 uint32_t method1, method2;
2494 int cpp; 2494 int cpp;
2495 2495
2496 if (mem_value == 0)
2497 return U32_MAX;
2498
2496 if (!intel_wm_plane_visible(cstate, pstate)) 2499 if (!intel_wm_plane_visible(cstate, pstate))
2497 return 0; 2500 return 0;
2498 2501
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2522 uint32_t method1, method2; 2525 uint32_t method1, method2;
2523 int cpp; 2526 int cpp;
2524 2527
2528 if (mem_value == 0)
2529 return U32_MAX;
2530
2525 if (!intel_wm_plane_visible(cstate, pstate)) 2531 if (!intel_wm_plane_visible(cstate, pstate))
2526 return 0; 2532 return 0;
2527 2533
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2545{ 2551{
2546 int cpp; 2552 int cpp;
2547 2553
2554 if (mem_value == 0)
2555 return U32_MAX;
2556
2548 if (!intel_wm_plane_visible(cstate, pstate)) 2557 if (!intel_wm_plane_visible(cstate, pstate))
2549 return 0; 2558 return 0;
2550 2559
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2881 * any underrun. If not able to get Dimm info assume 16GB dimm 2890 * any underrun. If not able to get Dimm info assume 16GB dimm
2882 * to avoid any underrun. 2891 * to avoid any underrun.
2883 */ 2892 */
2884 if (!dev_priv->dram_info.valid_dimm || 2893 if (dev_priv->dram_info.is_16gb_dimm)
2885 dev_priv->dram_info.is_16gb_dimm)
2886 wm[0] += 1; 2894 wm[0] += 1;
2887 2895
2888 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2896 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
3009 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3017 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3010} 3018}
3011 3019
3020static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
3021{
3022 /*
3023 * On some SNB machines (Thinkpad X220 Tablet at least)
3024 * LP3 usage can cause vblank interrupts to be lost.
3025 * The DEIIR bit will go high but it looks like the CPU
3026 * never gets interrupted.
3027 *
3028 * It's not clear whether other interrupt source could
3029 * be affected or if this is somehow limited to vblank
3030 * interrupts only. To play it safe we disable LP3
3031 * watermarks entirely.
3032 */
3033 if (dev_priv->wm.pri_latency[3] == 0 &&
3034 dev_priv->wm.spr_latency[3] == 0 &&
3035 dev_priv->wm.cur_latency[3] == 0)
3036 return;
3037
3038 dev_priv->wm.pri_latency[3] = 0;
3039 dev_priv->wm.spr_latency[3] = 0;
3040 dev_priv->wm.cur_latency[3] = 0;
3041
3042 DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
3043 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
3044 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3045 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3046}
3047
3012static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 3048static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3013{ 3049{
3014 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 3050 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3025 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 3061 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
3026 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 3062 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3027 3063
3028 if (IS_GEN6(dev_priv)) 3064 if (IS_GEN6(dev_priv)) {
3029 snb_wm_latency_quirk(dev_priv); 3065 snb_wm_latency_quirk(dev_priv);
3066 snb_wm_lp3_irq_quirk(dev_priv);
3067 }
3030} 3068}
3031 3069
3032static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 3070static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
@@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
3160 * and after the vblank. 3198 * and after the vblank.
3161 */ 3199 */
3162 *a = newstate->wm.ilk.optimal; 3200 *a = newstate->wm.ilk.optimal;
3163 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) 3201 if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
3202 intel_state->skip_intermediate_wm)
3164 return 0; 3203 return 0;
3165 3204
3166 a->pipe_enabled |= b->pipe_enabled; 3205 a->pipe_enabled |= b->pipe_enabled;
@@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
3612static bool 3651static bool
3613intel_has_sagv(struct drm_i915_private *dev_priv) 3652intel_has_sagv(struct drm_i915_private *dev_priv)
3614{ 3653{
3615 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || 3654 return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
3616 IS_CANNONLAKE(dev_priv)) 3655 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
3617 return true;
3618
3619 if (IS_SKYLAKE(dev_priv) &&
3620 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
3621 return true;
3622
3623 return false;
3624} 3656}
3625 3657
3626/* 3658/*
@@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
3784 3816
3785static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, 3817static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3786 const struct intel_crtc_state *cstate, 3818 const struct intel_crtc_state *cstate,
3787 const unsigned int total_data_rate, 3819 const u64 total_data_rate,
3788 const int num_active, 3820 const int num_active,
3789 struct skl_ddb_allocation *ddb) 3821 struct skl_ddb_allocation *ddb)
3790{ 3822{
@@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3798 return ddb_size - 4; /* 4 blocks for bypass path allocation */ 3830 return ddb_size - 4; /* 4 blocks for bypass path allocation */
3799 3831
3800 adjusted_mode = &cstate->base.adjusted_mode; 3832 adjusted_mode = &cstate->base.adjusted_mode;
3801 total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); 3833 total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
3802 3834
3803 /* 3835 /*
3804 * 12GB/s is maximum BW supported by single DBuf slice. 3836 * 12GB/s is maximum BW supported by single DBuf slice.
3805 */ 3837 */
3806 if (total_data_bw >= GBps(12) || num_active > 1) { 3838 if (num_active > 1 || total_data_bw >= GBps(12)) {
3807 ddb->enabled_slices = 2; 3839 ddb->enabled_slices = 2;
3808 } else { 3840 } else {
3809 ddb->enabled_slices = 1; 3841 ddb->enabled_slices = 1;
@@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
3814} 3846}
3815 3847
3816static void 3848static void
3817skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3849skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
3818 const struct intel_crtc_state *cstate, 3850 const struct intel_crtc_state *cstate,
3819 const unsigned int total_data_rate, 3851 const u64 total_data_rate,
3820 struct skl_ddb_allocation *ddb, 3852 struct skl_ddb_allocation *ddb,
3821 struct skl_ddb_entry *alloc, /* out */ 3853 struct skl_ddb_entry *alloc, /* out */
3822 int *num_active /* out */) 3854 int *num_active /* out */)
3823{ 3855{
3824 struct drm_atomic_state *state = cstate->base.state; 3856 struct drm_atomic_state *state = cstate->base.state;
3825 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3857 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3826 struct drm_i915_private *dev_priv = to_i915(dev);
3827 struct drm_crtc *for_crtc = cstate->base.crtc; 3858 struct drm_crtc *for_crtc = cstate->base.crtc;
3828 const struct drm_crtc_state *crtc_state; 3859 const struct drm_crtc_state *crtc_state;
3829 const struct drm_crtc *crtc; 3860 const struct drm_crtc *crtc;
@@ -3945,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
3945 val & PLANE_CTL_ALPHA_MASK); 3976 val & PLANE_CTL_ALPHA_MASK);
3946 3977
3947 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 3978 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3948 /* 3979 if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
3949 * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
3950 * registers for now.
3951 */
3952 if (INTEL_GEN(dev_priv) < 11)
3953 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); 3980 val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
3954 3981
3955 if (fourcc == DRM_FORMAT_NV12) {
3956 skl_ddb_entry_init_from_hw(dev_priv, 3982 skl_ddb_entry_init_from_hw(dev_priv,
3957 &ddb->plane[pipe][plane_id], val2); 3983 &ddb->plane[pipe][plane_id], val2);
3958 skl_ddb_entry_init_from_hw(dev_priv, 3984 skl_ddb_entry_init_from_hw(dev_priv,
@@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
4139 return 0; 4165 return 0;
4140} 4166}
4141 4167
4142static unsigned int 4168static u64
4143skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 4169skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4144 const struct drm_plane_state *pstate, 4170 const struct intel_plane_state *intel_pstate,
4145 const int plane) 4171 const int plane)
4146{ 4172{
4147 struct intel_plane *intel_plane = to_intel_plane(pstate->plane); 4173 struct intel_plane *intel_plane =
4148 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 4174 to_intel_plane(intel_pstate->base.plane);
4149 uint32_t data_rate; 4175 uint32_t data_rate;
4150 uint32_t width = 0, height = 0; 4176 uint32_t width = 0, height = 0;
4151 struct drm_framebuffer *fb; 4177 struct drm_framebuffer *fb;
4152 u32 format; 4178 u32 format;
4153 uint_fixed_16_16_t down_scale_amount; 4179 uint_fixed_16_16_t down_scale_amount;
4180 u64 rate;
4154 4181
4155 if (!intel_pstate->base.visible) 4182 if (!intel_pstate->base.visible)
4156 return 0; 4183 return 0;
4157 4184
4158 fb = pstate->fb; 4185 fb = intel_pstate->base.fb;
4159 format = fb->format->format; 4186 format = fb->format->format;
4160 4187
4161 if (intel_plane->id == PLANE_CURSOR) 4188 if (intel_plane->id == PLANE_CURSOR)
@@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
4177 height /= 2; 4204 height /= 2;
4178 } 4205 }
4179 4206
4180 data_rate = width * height * fb->format->cpp[plane]; 4207 data_rate = width * height;
4181 4208
4182 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); 4209 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4183 4210
4184 return mul_round_up_u32_fixed16(data_rate, down_scale_amount); 4211 rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4212
4213 rate *= fb->format->cpp[plane];
4214 return rate;
4185} 4215}
4186 4216
4187/* 4217static u64
4188 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
4189 * a 8192x4096@32bpp framebuffer:
4190 * 3 * 4096 * 8192 * 4 < 2^32
4191 */
4192static unsigned int
4193skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, 4218skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4194 unsigned int *plane_data_rate, 4219 u64 *plane_data_rate,
4195 unsigned int *uv_plane_data_rate) 4220 u64 *uv_plane_data_rate)
4196{ 4221{
4197 struct drm_crtc_state *cstate = &intel_cstate->base; 4222 struct drm_crtc_state *cstate = &intel_cstate->base;
4198 struct drm_atomic_state *state = cstate->state; 4223 struct drm_atomic_state *state = cstate->state;
4199 struct drm_plane *plane; 4224 struct drm_plane *plane;
4200 const struct drm_plane_state *pstate; 4225 const struct drm_plane_state *pstate;
4201 unsigned int total_data_rate = 0; 4226 u64 total_data_rate = 0;
4202 4227
4203 if (WARN_ON(!state)) 4228 if (WARN_ON(!state))
4204 return 0; 4229 return 0;
@@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4206 /* Calculate and cache data rate for each plane */ 4231 /* Calculate and cache data rate for each plane */
4207 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { 4232 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4208 enum plane_id plane_id = to_intel_plane(plane)->id; 4233 enum plane_id plane_id = to_intel_plane(plane)->id;
4209 unsigned int rate; 4234 u64 rate;
4235 const struct intel_plane_state *intel_pstate =
4236 to_intel_plane_state(pstate);
4210 4237
4211 /* packed/y */ 4238 /* packed/y */
4212 rate = skl_plane_relative_data_rate(intel_cstate, 4239 rate = skl_plane_relative_data_rate(intel_cstate,
4213 pstate, 0); 4240 intel_pstate, 0);
4214 plane_data_rate[plane_id] = rate; 4241 plane_data_rate[plane_id] = rate;
4215
4216 total_data_rate += rate; 4242 total_data_rate += rate;
4217 4243
4218 /* uv-plane */ 4244 /* uv-plane */
4219 rate = skl_plane_relative_data_rate(intel_cstate, 4245 rate = skl_plane_relative_data_rate(intel_cstate,
4220 pstate, 1); 4246 intel_pstate, 1);
4221 uv_plane_data_rate[plane_id] = rate; 4247 uv_plane_data_rate[plane_id] = rate;
4222
4223 total_data_rate += rate; 4248 total_data_rate += rate;
4224 } 4249 }
4225 4250
4226 return total_data_rate; 4251 return total_data_rate;
4227} 4252}
4228 4253
4254static u64
4255icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4256 u64 *plane_data_rate)
4257{
4258 struct drm_crtc_state *cstate = &intel_cstate->base;
4259 struct drm_atomic_state *state = cstate->state;
4260 struct drm_plane *plane;
4261 const struct drm_plane_state *pstate;
4262 u64 total_data_rate = 0;
4263
4264 if (WARN_ON(!state))
4265 return 0;
4266
4267 /* Calculate and cache data rate for each plane */
4268 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4269 const struct intel_plane_state *intel_pstate =
4270 to_intel_plane_state(pstate);
4271 enum plane_id plane_id = to_intel_plane(plane)->id;
4272 u64 rate;
4273
4274 if (!intel_pstate->linked_plane) {
4275 rate = skl_plane_relative_data_rate(intel_cstate,
4276 intel_pstate, 0);
4277 plane_data_rate[plane_id] = rate;
4278 total_data_rate += rate;
4279 } else {
4280 enum plane_id y_plane_id;
4281
4282 /*
4283 * The slave plane might not iterate in
4284 * drm_atomic_crtc_state_for_each_plane_state(),
4285 * and needs the master plane state which may be
4286 * NULL if we try get_new_plane_state(), so we
4287 * always calculate from the master.
4288 */
4289 if (intel_pstate->slave)
4290 continue;
4291
4292 /* Y plane rate is calculated on the slave */
4293 rate = skl_plane_relative_data_rate(intel_cstate,
4294 intel_pstate, 0);
4295 y_plane_id = intel_pstate->linked_plane->id;
4296 plane_data_rate[y_plane_id] = rate;
4297 total_data_rate += rate;
4298
4299 rate = skl_plane_relative_data_rate(intel_cstate,
4300 intel_pstate, 1);
4301 plane_data_rate[plane_id] = rate;
4302 total_data_rate += rate;
4303 }
4304 }
4305
4306 return total_data_rate;
4307}
4308
4229static uint16_t 4309static uint16_t
4230skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) 4310skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
4231{ 4311{
@@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4298 4378
4299 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { 4379 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4300 enum plane_id plane_id = to_intel_plane(plane)->id; 4380 enum plane_id plane_id = to_intel_plane(plane)->id;
4381 struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
4301 4382
4302 if (plane_id == PLANE_CURSOR) 4383 if (plane_id == PLANE_CURSOR)
4303 continue; 4384 continue;
4304 4385
4305 if (!pstate->visible) 4386 /* slave plane must be invisible and calculated from master */
4387 if (!pstate->visible || WARN_ON(plane_state->slave))
4306 continue; 4388 continue;
4307 4389
4308 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); 4390 if (!plane_state->linked_plane) {
4309 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); 4391 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4392 uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4393 } else {
4394 enum plane_id y_plane_id =
4395 plane_state->linked_plane->id;
4396
4397 minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
4398 minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4399 }
4310 } 4400 }
4311 4401
4312 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); 4402 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4318,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4318{ 4408{
4319 struct drm_atomic_state *state = cstate->base.state; 4409 struct drm_atomic_state *state = cstate->base.state;
4320 struct drm_crtc *crtc = cstate->base.crtc; 4410 struct drm_crtc *crtc = cstate->base.crtc;
4321 struct drm_device *dev = crtc->dev; 4411 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4322 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4412 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4323 enum pipe pipe = intel_crtc->pipe; 4413 enum pipe pipe = intel_crtc->pipe;
4324 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 4414 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4325 uint16_t alloc_size, start; 4415 uint16_t alloc_size, start;
4326 uint16_t minimum[I915_MAX_PLANES] = {}; 4416 uint16_t minimum[I915_MAX_PLANES] = {};
4327 uint16_t uv_minimum[I915_MAX_PLANES] = {}; 4417 uint16_t uv_minimum[I915_MAX_PLANES] = {};
4328 unsigned int total_data_rate; 4418 u64 total_data_rate;
4329 enum plane_id plane_id; 4419 enum plane_id plane_id;
4330 int num_active; 4420 int num_active;
4331 unsigned int plane_data_rate[I915_MAX_PLANES] = {}; 4421 u64 plane_data_rate[I915_MAX_PLANES] = {};
4332 unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {}; 4422 u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
4333 uint16_t total_min_blocks = 0; 4423 uint16_t total_min_blocks = 0;
4334 4424
4335 /* Clear the partitioning for disabled planes. */ 4425 /* Clear the partitioning for disabled planes. */
@@ -4344,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4344 return 0; 4434 return 0;
4345 } 4435 }
4346 4436
4347 total_data_rate = skl_get_total_relative_data_rate(cstate, 4437 if (INTEL_GEN(dev_priv) < 11)
4348 plane_data_rate, 4438 total_data_rate =
4349 uv_plane_data_rate); 4439 skl_get_total_relative_data_rate(cstate,
4350 skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, 4440 plane_data_rate,
4351 alloc, &num_active); 4441 uv_plane_data_rate);
4442 else
4443 total_data_rate =
4444 icl_get_total_relative_data_rate(cstate,
4445 plane_data_rate);
4446
4447 skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
4448 ddb, alloc, &num_active);
4352 alloc_size = skl_ddb_entry_size(alloc); 4449 alloc_size = skl_ddb_entry_size(alloc);
4353 if (alloc_size == 0) 4450 if (alloc_size == 0)
4354 return 0; 4451 return 0;
@@ -4388,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4388 4485
4389 start = alloc->start; 4486 start = alloc->start;
4390 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4487 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4391 unsigned int data_rate, uv_data_rate; 4488 u64 data_rate, uv_data_rate;
4392 uint16_t plane_blocks, uv_plane_blocks; 4489 uint16_t plane_blocks, uv_plane_blocks;
4393 4490
4394 if (plane_id == PLANE_CURSOR) 4491 if (plane_id == PLANE_CURSOR)
@@ -4402,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4402 * result is < available as data_rate / total_data_rate < 1 4499 * result is < available as data_rate / total_data_rate < 1
4403 */ 4500 */
4404 plane_blocks = minimum[plane_id]; 4501 plane_blocks = minimum[plane_id];
4405 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 4502 plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
4406 total_data_rate);
4407 4503
4408 /* Leave disabled planes at (0,0) */ 4504 /* Leave disabled planes at (0,0) */
4409 if (data_rate) { 4505 if (data_rate) {
@@ -4417,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4417 uv_data_rate = uv_plane_data_rate[plane_id]; 4513 uv_data_rate = uv_plane_data_rate[plane_id];
4418 4514
4419 uv_plane_blocks = uv_minimum[plane_id]; 4515 uv_plane_blocks = uv_minimum[plane_id];
4420 uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate, 4516 uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
4421 total_data_rate); 4517
4518 /* Gen11+ uses a separate plane for UV watermarks */
4519 WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
4422 4520
4423 if (uv_data_rate) { 4521 if (uv_data_rate) {
4424 ddb->uv_plane[pipe][plane_id].start = start; 4522 ddb->uv_plane[pipe][plane_id].start = start;
@@ -4476,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
4476} 4574}
4477 4575
4478static uint_fixed_16_16_t 4576static uint_fixed_16_16_t
4479intel_get_linetime_us(struct intel_crtc_state *cstate) 4577intel_get_linetime_us(const struct intel_crtc_state *cstate)
4480{ 4578{
4481 uint32_t pixel_rate; 4579 uint32_t pixel_rate;
4482 uint32_t crtc_htotal; 4580 uint32_t crtc_htotal;
@@ -4520,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
4520 4618
4521static int 4619static int
4522skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, 4620skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4523 struct intel_crtc_state *cstate, 4621 const struct intel_crtc_state *cstate,
4524 const struct intel_plane_state *intel_pstate, 4622 const struct intel_plane_state *intel_pstate,
4525 struct skl_wm_params *wp, int plane_id) 4623 struct skl_wm_params *wp, int plane_id)
4526{ 4624{
@@ -4627,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
4627} 4725}
4628 4726
4629static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 4727static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4630 struct intel_crtc_state *cstate, 4728 const struct intel_crtc_state *cstate,
4631 const struct intel_plane_state *intel_pstate, 4729 const struct intel_plane_state *intel_pstate,
4632 uint16_t ddb_allocation, 4730 uint16_t ddb_allocation,
4633 int level, 4731 int level,
@@ -4672,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4672 } else { 4770 } else {
4673 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / 4771 if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4674 wp->dbuf_block_size < 1) && 4772 wp->dbuf_block_size < 1) &&
4675 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) 4773 (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
4676 selected_result = method2; 4774 selected_result = method2;
4677 else if (ddb_allocation >= 4775 } else if (ddb_allocation >=
4678 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) 4776 fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
4679 selected_result = min_fixed16(method1, method2); 4777 if (IS_GEN9(dev_priv) &&
4680 else if (latency >= wp->linetime_us) 4778 !IS_GEMINILAKE(dev_priv))
4681 selected_result = min_fixed16(method1, method2); 4779 selected_result = min_fixed16(method1, method2);
4682 else 4780 else
4781 selected_result = method2;
4782 } else if (latency >= wp->linetime_us) {
4783 if (IS_GEN9(dev_priv) &&
4784 !IS_GEMINILAKE(dev_priv))
4785 selected_result = min_fixed16(method1, method2);
4786 else
4787 selected_result = method2;
4788 } else {
4683 selected_result = method1; 4789 selected_result = method1;
4790 }
4684 } 4791 }
4685 4792
4686 res_blocks = fixed16_to_u32_round_up(selected_result) + 1; 4793 res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4756,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4756 } 4863 }
4757 } 4864 }
4758 4865
4759 /*
4760 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
4761 * disable wm level 1-7 on NV12 planes
4762 */
4763 if (wp->is_planar && level >= 1 &&
4764 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4765 IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
4766 result->plane_en = false;
4767 return 0;
4768 }
4769
4770 /* The number of lines are ignored for the level 0 watermark. */ 4866 /* The number of lines are ignored for the level 0 watermark. */
4771 result->plane_res_b = res_blocks; 4867 result->plane_res_b = res_blocks;
4772 result->plane_res_l = res_lines; 4868 result->plane_res_l = res_lines;
@@ -4778,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4778static int 4874static int
4779skl_compute_wm_levels(const struct drm_i915_private *dev_priv, 4875skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4780 struct skl_ddb_allocation *ddb, 4876 struct skl_ddb_allocation *ddb,
4781 struct intel_crtc_state *cstate, 4877 const struct intel_crtc_state *cstate,
4782 const struct intel_plane_state *intel_pstate, 4878 const struct intel_plane_state *intel_pstate,
4879 uint16_t ddb_blocks,
4783 const struct skl_wm_params *wm_params, 4880 const struct skl_wm_params *wm_params,
4784 struct skl_plane_wm *wm, 4881 struct skl_plane_wm *wm,
4785 int plane_id) 4882 struct skl_wm_level *levels)
4786{ 4883{
4787 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4788 struct drm_plane *plane = intel_pstate->base.plane;
4789 struct intel_plane *intel_plane = to_intel_plane(plane);
4790 uint16_t ddb_blocks;
4791 enum pipe pipe = intel_crtc->pipe;
4792 int level, max_level = ilk_wm_max_level(dev_priv); 4884 int level, max_level = ilk_wm_max_level(dev_priv);
4793 enum plane_id intel_plane_id = intel_plane->id; 4885 struct skl_wm_level *result_prev = &levels[0];
4794 int ret; 4886 int ret;
4795 4887
4796 if (WARN_ON(!intel_pstate->base.fb)) 4888 if (WARN_ON(!intel_pstate->base.fb))
4797 return -EINVAL; 4889 return -EINVAL;
4798 4890
4799 ddb_blocks = plane_id ?
4800 skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
4801 skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
4802
4803 for (level = 0; level <= max_level; level++) { 4891 for (level = 0; level <= max_level; level++) {
4804 struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] : 4892 struct skl_wm_level *result = &levels[level];
4805 &wm->wm[level];
4806 struct skl_wm_level *result_prev;
4807
4808 if (level)
4809 result_prev = plane_id ? &wm->uv_wm[level - 1] :
4810 &wm->wm[level - 1];
4811 else
4812 result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
4813 4893
4814 ret = skl_compute_plane_wm(dev_priv, 4894 ret = skl_compute_plane_wm(dev_priv,
4815 cstate, 4895 cstate,
@@ -4821,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4821 result); 4901 result);
4822 if (ret) 4902 if (ret)
4823 return ret; 4903 return ret;
4904
4905 result_prev = result;
4824 } 4906 }
4825 4907
4826 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) 4908 if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
@@ -4830,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4830} 4912}
4831 4913
4832static uint32_t 4914static uint32_t
4833skl_compute_linetime_wm(struct intel_crtc_state *cstate) 4915skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
4834{ 4916{
4835 struct drm_atomic_state *state = cstate->base.state; 4917 struct drm_atomic_state *state = cstate->base.state;
4836 struct drm_i915_private *dev_priv = to_i915(state->dev); 4918 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4852,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4852 return linetime_wm; 4934 return linetime_wm;
4853} 4935}
4854 4936
4855static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 4937static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
4856 struct skl_wm_params *wp, 4938 struct skl_wm_params *wp,
4857 struct skl_wm_level *wm_l0, 4939 struct skl_wm_level *wm_l0,
4858 uint16_t ddb_allocation, 4940 uint16_t ddb_allocation,
@@ -4862,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4862 const struct drm_i915_private *dev_priv = to_i915(dev); 4944 const struct drm_i915_private *dev_priv = to_i915(dev);
4863 uint16_t trans_min, trans_y_tile_min; 4945 uint16_t trans_min, trans_y_tile_min;
4864 const uint16_t trans_amount = 10; /* This is configurable amount */ 4946 const uint16_t trans_amount = 10; /* This is configurable amount */
4865 uint16_t trans_offset_b, res_blocks; 4947 uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
4866 4948
4867 if (!cstate->base.active) 4949 if (!cstate->base.active)
4868 goto exit; 4950 goto exit;
@@ -4875,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4875 if (!dev_priv->ipc_enabled) 4957 if (!dev_priv->ipc_enabled)
4876 goto exit; 4958 goto exit;
4877 4959
4878 trans_min = 0; 4960 trans_min = 14;
4879 if (INTEL_GEN(dev_priv) >= 10) 4961 if (INTEL_GEN(dev_priv) >= 11)
4880 trans_min = 4; 4962 trans_min = 4;
4881 4963
4882 trans_offset_b = trans_min + trans_amount; 4964 trans_offset_b = trans_min + trans_amount;
4883 4965
4966 /*
4967 * The spec asks for Selected Result Blocks for wm0 (the real value),
4968 * not Result Blocks (the integer value). Pay attention to the capital
4969 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4970 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4971 * and since we later will have to get the ceiling of the sum in the
4972 * transition watermarks calculation, we can just pretend Selected
4973 * Result Blocks is Result Blocks minus 1 and it should work for the
4974 * current platforms.
4975 */
4976 wm0_sel_res_b = wm_l0->plane_res_b - 1;
4977
4884 if (wp->y_tiled) { 4978 if (wp->y_tiled) {
4885 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, 4979 trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
4886 wp->y_tile_minimum); 4980 wp->y_tile_minimum);
4887 res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + 4981 res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
4888 trans_offset_b; 4982 trans_offset_b;
4889 } else { 4983 } else {
4890 res_blocks = wm_l0->plane_res_b + trans_offset_b; 4984 res_blocks = wm0_sel_res_b + trans_offset_b;
4891 4985
4892 /* WA BUG:1938466 add one block for non y-tile planes */ 4986 /* WA BUG:1938466 add one block for non y-tile planes */
4893 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) 4987 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4907,16 +5001,101 @@ exit:
4907 trans_wm->plane_en = false; 5001 trans_wm->plane_en = false;
4908} 5002}
4909 5003
5004static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5005 struct skl_pipe_wm *pipe_wm,
5006 enum plane_id plane_id,
5007 const struct intel_crtc_state *cstate,
5008 const struct intel_plane_state *pstate,
5009 int color_plane)
5010{
5011 struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
5012 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5013 enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
5014 struct skl_wm_params wm_params;
5015 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5016 int ret;
5017
5018 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
5019 &wm_params, color_plane);
5020 if (ret)
5021 return ret;
5022
5023 ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5024 ddb_blocks, &wm_params, wm, wm->wm);
5025
5026 if (ret)
5027 return ret;
5028
5029 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
5030 ddb_blocks, &wm->trans_wm);
5031
5032 return 0;
5033}
5034
5035static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
5036 struct skl_pipe_wm *pipe_wm,
5037 const struct intel_crtc_state *cstate,
5038 const struct intel_plane_state *pstate)
5039{
5040 enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
5041
5042 return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5043}
5044
5045static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5046 struct skl_pipe_wm *pipe_wm,
5047 const struct intel_crtc_state *cstate,
5048 const struct intel_plane_state *pstate)
5049{
5050 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
5051 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5052 enum plane_id plane_id = plane->id;
5053 struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
5054 struct skl_wm_params wm_params;
5055 enum pipe pipe = plane->pipe;
5056 uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
5057 int ret;
5058
5059 ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
5060 if (ret)
5061 return ret;
5062
5063 /* uv plane watermarks must also be validated for NV12/Planar */
5064 ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
5065
5066 ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
5067 if (ret)
5068 return ret;
5069
5070 return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
5071 ddb_blocks, &wm_params, wm, wm->uv_wm);
5072}
5073
5074static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
5075 struct skl_pipe_wm *pipe_wm,
5076 const struct intel_crtc_state *cstate,
5077 const struct intel_plane_state *pstate)
5078{
5079 int ret;
5080 enum plane_id y_plane_id = pstate->linked_plane->id;
5081 enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
5082
5083 ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
5084 cstate, pstate, 0);
5085 if (ret)
5086 return ret;
5087
5088 return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
5089 cstate, pstate, 1);
5090}
5091
4910static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 5092static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4911 struct skl_ddb_allocation *ddb, 5093 struct skl_ddb_allocation *ddb,
4912 struct skl_pipe_wm *pipe_wm) 5094 struct skl_pipe_wm *pipe_wm)
4913{ 5095{
4914 struct drm_device *dev = cstate->base.crtc->dev;
4915 struct drm_crtc_state *crtc_state = &cstate->base; 5096 struct drm_crtc_state *crtc_state = &cstate->base;
4916 const struct drm_i915_private *dev_priv = to_i915(dev);
4917 struct drm_plane *plane; 5097 struct drm_plane *plane;
4918 const struct drm_plane_state *pstate; 5098 const struct drm_plane_state *pstate;
4919 struct skl_plane_wm *wm;
4920 int ret; 5099 int ret;
4921 5100
4922 /* 5101 /*
@@ -4928,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
4928 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 5107 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
4929 const struct intel_plane_state *intel_pstate = 5108 const struct intel_plane_state *intel_pstate =
4930 to_intel_plane_state(pstate); 5109 to_intel_plane_state(pstate);
4931 enum plane_id plane_id = to_intel_plane(plane)->id;
4932 struct skl_wm_params wm_params;
4933 enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
4934 uint16_t ddb_blocks;
4935 5110
4936 wm = &pipe_wm->planes[plane_id]; 5111 /* Watermarks calculated in master */
4937 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); 5112 if (intel_pstate->slave)
5113 continue;
4938 5114
4939 ret = skl_compute_plane_wm_params(dev_priv, cstate, 5115 if (intel_pstate->linked_plane)
4940 intel_pstate, &wm_params, 0); 5116 ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
4941 if (ret) 5117 else if (intel_pstate->base.fb &&
4942 return ret; 5118 intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
5119 ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
5120 else
5121 ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
4943 5122
4944 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4945 intel_pstate, &wm_params, wm, 0);
4946 if (ret) 5123 if (ret)
4947 return ret; 5124 return ret;
4948
4949 skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
4950 ddb_blocks, &wm->trans_wm);
4951
4952 /* uv plane watermarks must also be validated for NV12/Planar */
4953 if (wm_params.is_planar) {
4954 memset(&wm_params, 0, sizeof(struct skl_wm_params));
4955 wm->is_planar = true;
4956
4957 ret = skl_compute_plane_wm_params(dev_priv, cstate,
4958 intel_pstate,
4959 &wm_params, 1);
4960 if (ret)
4961 return ret;
4962
4963 ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4964 intel_pstate, &wm_params,
4965 wm, 1);
4966 if (ret)
4967 return ret;
4968 }
4969 } 5125 }
4970 5126
4971 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 5127 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5016,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5016 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), 5172 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5017 &wm->trans_wm); 5173 &wm->trans_wm);
5018 5174
5019 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5175 if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
5020 &ddb->plane[pipe][plane_id]);
5021 /* FIXME: add proper NV12 support for ICL. */
5022 if (INTEL_GEN(dev_priv) >= 11)
5023 return skl_ddb_entry_write(dev_priv,
5024 PLANE_BUF_CFG(pipe, plane_id),
5025 &ddb->plane[pipe][plane_id]);
5026 if (wm->is_planar) {
5027 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5176 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5028 &ddb->uv_plane[pipe][plane_id]); 5177 &ddb->uv_plane[pipe][plane_id]);
5029 skl_ddb_entry_write(dev_priv, 5178 skl_ddb_entry_write(dev_priv,
@@ -5032,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
5032 } else { 5181 } else {
5033 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 5182 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
5034 &ddb->plane[pipe][plane_id]); 5183 &ddb->plane[pipe][plane_id]);
5035 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); 5184 if (INTEL_GEN(dev_priv) < 11)
5185 I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
5036 } 5186 }
5037} 5187}
5038 5188
@@ -5076,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
5076 return a->start < b->end && b->start < a->end; 5226 return a->start < b->end && b->start < a->end;
5077} 5227}
5078 5228
5079bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, 5229bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
5080 const struct skl_ddb_entry **entries, 5230 const struct skl_ddb_entry entries[],
5081 const struct skl_ddb_entry *ddb, 5231 int num_entries, int ignore_idx)
5082 int ignore)
5083{ 5232{
5084 enum pipe pipe; 5233 int i;
5085 5234
5086 for_each_pipe(dev_priv, pipe) { 5235 for (i = 0; i < num_entries; i++) {
5087 if (pipe != ignore && entries[pipe] && 5236 if (i != ignore_idx &&
5088 skl_ddb_entries_overlap(ddb, entries[pipe])) 5237 skl_ddb_entries_overlap(ddb, &entries[i]))
5089 return true; 5238 return true;
5090 } 5239 }
5091 5240
@@ -5137,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5137 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5286 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5138 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 5287 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
5139 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 5288 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
5140 struct drm_plane_state *plane_state;
5141 struct drm_plane *plane; 5289 struct drm_plane *plane;
5142 enum pipe pipe = intel_crtc->pipe; 5290 enum pipe pipe = intel_crtc->pipe;
5143 5291
5144 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { 5292 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
5293 struct drm_plane_state *plane_state;
5294 struct intel_plane *linked;
5145 enum plane_id plane_id = to_intel_plane(plane)->id; 5295 enum plane_id plane_id = to_intel_plane(plane)->id;
5146 5296
5147 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], 5297 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
@@ -5153,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
5153 plane_state = drm_atomic_get_plane_state(state, plane); 5303 plane_state = drm_atomic_get_plane_state(state, plane);
5154 if (IS_ERR(plane_state)) 5304 if (IS_ERR(plane_state))
5155 return PTR_ERR(plane_state); 5305 return PTR_ERR(plane_state);
5306
5307 /* Make sure linked plane is updated too */
5308 linked = to_intel_plane_state(plane_state)->linked_plane;
5309 if (!linked)
5310 continue;
5311
5312 plane_state = drm_atomic_get_plane_state(state, &linked->base);
5313 if (IS_ERR(plane_state))
5314 return PTR_ERR(plane_state);
5156 } 5315 }
5157 5316
5158 return 0; 5317 return 0;
@@ -5211,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
5211 if (skl_ddb_entry_equal(old, new)) 5370 if (skl_ddb_entry_equal(old, new))
5212 continue; 5371 continue;
5213 5372
5214 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", 5373 DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
5215 intel_plane->base.base.id, 5374 intel_plane->base.base.id,
5216 intel_plane->base.name, 5375 intel_plane->base.name,
5217 old->start, old->end, 5376 old->start, old->end,
5218 new->start, new->end); 5377 new->start, new->end);
5219 } 5378 }
5220 } 5379 }
5221} 5380}
@@ -6117,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6117{ 6276{
6118 u32 val; 6277 u32 val;
6119 6278
6120 /* Display WA #0477 WaDisableIPC: skl */ 6279 if (!HAS_IPC(dev_priv))
6121 if (IS_SKYLAKE(dev_priv)) 6280 return;
6122 dev_priv->ipc_enabled = false;
6123
6124 /* Display WA #1141: SKL:all KBL:all CFL */
6125 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
6126 !dev_priv->dram_info.symmetric_memory)
6127 dev_priv->ipc_enabled = false;
6128 6281
6129 val = I915_READ(DISP_ARB_CTL2); 6282 val = I915_READ(DISP_ARB_CTL2);
6130 6283
@@ -6138,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6138 6291
6139void intel_init_ipc(struct drm_i915_private *dev_priv) 6292void intel_init_ipc(struct drm_i915_private *dev_priv)
6140{ 6293{
6141 dev_priv->ipc_enabled = false;
6142 if (!HAS_IPC(dev_priv)) 6294 if (!HAS_IPC(dev_priv))
6143 return; 6295 return;
6144 6296
6145 dev_priv->ipc_enabled = true; 6297 /* Display WA #1141: SKL:all KBL:all CFL */
6298 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
6299 dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
6300 else
6301 dev_priv->ipc_enabled = true;
6302
6146 intel_enable_ipc(dev_priv); 6303 intel_enable_ipc(dev_priv);
6147} 6304}
6148 6305
@@ -8736,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
8736 /* This is not an Wa. Enable to reduce Sampler power */ 8893 /* This is not an Wa. Enable to reduce Sampler power */
8737 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, 8894 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
8738 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); 8895 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
8896
8897 /* WaEnable32PlaneMode:icl */
8898 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
8899 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
8739} 8900}
8740 8901
8741static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) 8902static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9313,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
9313/* Set up chip specific power management-related functions */ 9474/* Set up chip specific power management-related functions */
9314void intel_init_pm(struct drm_i915_private *dev_priv) 9475void intel_init_pm(struct drm_i915_private *dev_priv)
9315{ 9476{
9316 intel_fbc_init(dev_priv);
9317
9318 /* For cxsr */ 9477 /* For cxsr */
9319 if (IS_PINEVIEW(dev_priv)) 9478 if (IS_PINEVIEW(dev_priv))
9320 i915_pineview_get_mem_freq(dev_priv); 9479 i915_pineview_get_mem_freq(dev_priv);