aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
10 files changed, 72 insertions, 83 deletions
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..7cb0818a13de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
323{ 323{
324 struct intel_gvt_irq *irq = &gvt->irq; 324 struct intel_gvt_irq *irq = &gvt->irq;
325 struct intel_vgpu *vgpu; 325 struct intel_vgpu *vgpu;
326 bool have_enabled_pipe = false;
327 int pipe, id; 326 int pipe, id;
328 327
329 if (WARN_ON(!mutex_is_locked(&gvt->lock))) 328 if (WARN_ON(!mutex_is_locked(&gvt->lock)))
330 return; 329 return;
331 330
332 hrtimer_cancel(&irq->vblank_timer.timer);
333
334 for_each_active_vgpu(gvt, vgpu, id) { 331 for_each_active_vgpu(gvt, vgpu, id) {
335 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { 332 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
336 have_enabled_pipe = 333 if (pipe_is_enabled(vgpu, pipe))
337 pipe_is_enabled(vgpu, pipe); 334 goto out;
338 if (have_enabled_pipe)
339 break;
340 } 335 }
341 } 336 }
342 337
343 if (have_enabled_pipe) 338 /* all the pipes are disabled */
344 hrtimer_start(&irq->vblank_timer.timer, 339 hrtimer_cancel(&irq->vblank_timer.timer);
345 ktime_add_ns(ktime_get(), irq->vblank_timer.period), 340 return;
346 HRTIMER_MODE_ABS); 341
342out:
343 hrtimer_start(&irq->vblank_timer.timer,
344 ktime_add_ns(ktime_get(), irq->vblank_timer.period),
345 HRTIMER_MODE_ABS);
346
347} 347}
348 348
349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) 349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..348b29a845c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
114 return NOTIFY_DONE; 114 return NOTIFY_DONE;
115} 115}
116 116
117void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 117bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
118 unsigned int flags) 118 unsigned int flags)
119{ 119{
120 struct clflush *clflush; 120 struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
128 */ 128 */
129 if (!i915_gem_object_has_struct_page(obj)) { 129 if (!i915_gem_object_has_struct_page(obj)) {
130 obj->cache_dirty = false; 130 obj->cache_dirty = false;
131 return; 131 return false;
132 } 132 }
133 133
134 /* If the GPU is snooping the contents of the CPU cache, 134 /* If the GPU is snooping the contents of the CPU cache,
@@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
140 * tracking. 140 * tracking.
141 */ 141 */
142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) 142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
143 return; 143 return false;
144 144
145 trace_i915_gem_object_clflush(obj); 145 trace_i915_gem_object_clflush(obj);
146 146
@@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
179 } 179 }
180 180
181 obj->cache_dirty = false; 181 obj->cache_dirty = false;
182 return true;
182} 183}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
28struct drm_i915_private; 28struct drm_i915_private;
29struct drm_i915_gem_object; 29struct drm_i915_gem_object;
30 30
31void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 31bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
32 unsigned int flags); 32 unsigned int flags);
33#define I915_CLFLUSH_FORCE BIT(0) 33#define I915_CLFLUSH_FORCE BIT(0)
34#define I915_CLFLUSH_SYNC BIT(1) 34#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..e9503f6d1100 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
560 eb->args->flags |= __EXEC_HAS_RELOC; 560 eb->args->flags |= __EXEC_HAS_RELOC;
561 } 561 }
562 562
563 entry->flags |= __EXEC_OBJECT_HAS_PIN;
564 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
565
566 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { 563 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
567 err = i915_vma_get_fence(vma); 564 err = i915_vma_get_fence(vma);
568 if (unlikely(err)) { 565 if (unlikely(err)) {
@@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
574 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 571 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
575 } 572 }
576 573
574 entry->flags |= __EXEC_OBJECT_HAS_PIN;
575 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
576
577 return 0; 577 return 0;
578} 578}
579 579
@@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1458 * to read. However, if the array is not writable the user loses 1458 * to read. However, if the array is not writable the user loses
1459 * the updated relocation values. 1459 * the updated relocation values.
1460 */ 1460 */
1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) 1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1462 return -EFAULT; 1462 return -EFAULT;
1463 1463
1464 do { 1464 do {
@@ -1775,7 +1775,7 @@ out:
1775 } 1775 }
1776 } 1776 }
1777 1777
1778 return err ?: have_copy; 1778 return err;
1779} 1779}
1780 1780
1781static int eb_relocate(struct i915_execbuffer *eb) 1781static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1825 int err; 1825 int err;
1826 1826
1827 for (i = 0; i < count; i++) { 1827 for (i = 0; i < count; i++) {
1828 const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1828 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1829 struct i915_vma *vma = exec_to_vma(entry); 1829 struct i915_vma *vma = exec_to_vma(entry);
1830 struct drm_i915_gem_object *obj = vma->obj; 1830 struct drm_i915_gem_object *obj = vma->obj;
1831 1831
@@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1841 eb->request->capture_list = capture; 1841 eb->request->capture_list = capture;
1842 } 1842 }
1843 1843
1844 if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
1845 if (i915_gem_clflush_object(obj, 0))
1846 entry->flags &= ~EXEC_OBJECT_ASYNC;
1847 }
1848
1844 if (entry->flags & EXEC_OBJECT_ASYNC) 1849 if (entry->flags & EXEC_OBJECT_ASYNC)
1845 goto skip_flushes; 1850 goto skip_flushes;
1846 1851
1847 if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1848 i915_gem_clflush_object(obj, 0);
1849
1850 err = i915_gem_request_await_object 1852 err = i915_gem_request_await_object
1851 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); 1853 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
1852 if (err) 1854 if (err)
@@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2209 goto err_unlock; 2211 goto err_unlock;
2210 2212
2211 err = eb_relocate(&eb); 2213 err = eb_relocate(&eb);
2212 if (err) 2214 if (err) {
2213 /* 2215 /*
2214 * If the user expects the execobject.offset and 2216 * If the user expects the execobject.offset and
2215 * reloc.presumed_offset to be an exact match, 2217 * reloc.presumed_offset to be an exact match,
@@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2218 * relocation. 2220 * relocation.
2219 */ 2221 */
2220 args->flags &= ~__EXEC_HAS_RELOC; 2222 args->flags &= ~__EXEC_HAS_RELOC;
2221 if (err < 0)
2222 goto err_vma; 2223 goto err_vma;
2224 }
2223 2225
2224 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { 2226 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
2225 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 2227 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..20cf272c97b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
284 284
285static inline void __i915_vma_unpin(struct i915_vma *vma) 285static inline void __i915_vma_unpin(struct i915_vma *vma)
286{ 286{
287 GEM_BUG_ON(!i915_vma_is_pinned(vma));
288 vma->flags--; 287 vma->flags--;
289} 288}
290 289
291static inline void i915_vma_unpin(struct i915_vma *vma) 290static inline void i915_vma_unpin(struct i915_vma *vma)
292{ 291{
292 GEM_BUG_ON(!i915_vma_is_pinned(vma));
293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
294 __i915_vma_unpin(vma); 294 __i915_vma_unpin(vma);
295} 295}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..9edeaaef77ad 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); 1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
1897 val &= ~LOADGEN_SELECT; 1897 val &= ~LOADGEN_SELECT;
1898 1898
1899 if (((rate < 600000) && (width == 4) && (ln >= 1)) || 1899 if ((rate <= 600000 && width == 4 && ln >= 1) ||
1900 ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { 1900 (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
1901 val |= LOADGEN_SELECT; 1901 val |= LOADGEN_SELECT;
1902 } 1902 }
1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); 1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..9471c88d449e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3428} 3428}
3429 3429
3430static void intel_update_primary_planes(struct drm_device *dev)
3431{
3432 struct drm_crtc *crtc;
3433
3434 for_each_crtc(dev, crtc) {
3435 struct intel_plane *plane = to_intel_plane(crtc->primary);
3436 struct intel_plane_state *plane_state =
3437 to_intel_plane_state(plane->base.state);
3438
3439 if (plane_state->base.visible) {
3440 trace_intel_update_plane(&plane->base,
3441 to_intel_crtc(crtc));
3442
3443 plane->update_plane(plane,
3444 to_intel_crtc_state(crtc->state),
3445 plane_state);
3446 }
3447 }
3448}
3449
3450static int 3430static int
3451__intel_display_resume(struct drm_device *dev, 3431__intel_display_resume(struct drm_device *dev,
3452 struct drm_atomic_state *state, 3432 struct drm_atomic_state *state,
@@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3499 struct drm_atomic_state *state; 3479 struct drm_atomic_state *state;
3500 int ret; 3480 int ret;
3501 3481
3482
3483 /* reset doesn't touch the display */
3484 if (!i915.force_reset_modeset_test &&
3485 !gpu_reset_clobbers_display(dev_priv))
3486 return;
3487
3502 /* 3488 /*
3503 * Need mode_config.mutex so that we don't 3489 * Need mode_config.mutex so that we don't
3504 * trample ongoing ->detect() and whatnot. 3490 * trample ongoing ->detect() and whatnot.
@@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3512 3498
3513 drm_modeset_backoff(ctx); 3499 drm_modeset_backoff(ctx);
3514 } 3500 }
3515
3516 /* reset doesn't touch the display, but flips might get nuked anyway, */
3517 if (!i915.force_reset_modeset_test &&
3518 !gpu_reset_clobbers_display(dev_priv))
3519 return;
3520
3521 /* 3501 /*
3522 * Disabling the crtcs gracefully seems nicer. Also the 3502 * Disabling the crtcs gracefully seems nicer. Also the
3523 * g33 docs say we should at least disable all the planes. 3503 * g33 docs say we should at least disable all the planes.
@@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3547 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3527 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3548 int ret; 3528 int ret;
3549 3529
3530 /* reset doesn't touch the display */
3531 if (!i915.force_reset_modeset_test &&
3532 !gpu_reset_clobbers_display(dev_priv))
3533 return;
3534
3535 if (!state)
3536 goto unlock;
3537
3550 /* 3538 /*
3551 * Flips in the rings will be nuked by the reset, 3539 * Flips in the rings will be nuked by the reset,
3552 * so complete all pending flips so that user space 3540 * so complete all pending flips so that user space
@@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3558 3546
3559 /* reset doesn't touch the display */ 3547 /* reset doesn't touch the display */
3560 if (!gpu_reset_clobbers_display(dev_priv)) { 3548 if (!gpu_reset_clobbers_display(dev_priv)) {
3561 if (!state) { 3549 /* for testing only restore the display */
3562 /* 3550 ret = __intel_display_resume(dev, state, ctx);
3563 * Flips in the rings have been nuked by the reset,
3564 * so update the base address of all primary
3565 * planes to the the last fb to make sure we're
3566 * showing the correct fb after a reset.
3567 *
3568 * FIXME: Atomic will make this obsolete since we won't schedule
3569 * CS-based flips (which might get lost in gpu resets) any more.
3570 */
3571 intel_update_primary_planes(dev);
3572 } else {
3573 ret = __intel_display_resume(dev, state, ctx);
3574 if (ret) 3551 if (ret)
3575 DRM_ERROR("Restoring old state failed with %i\n", ret); 3552 DRM_ERROR("Restoring old state failed with %i\n", ret);
3576 }
3577 } else { 3553 } else {
3578 /* 3554 /*
3579 * The display has been reset as well, 3555 * The display has been reset as well,
@@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3597 intel_hpd_init(dev_priv); 3573 intel_hpd_init(dev_priv);
3598 } 3574 }
3599 3575
3600 if (state) 3576 drm_atomic_state_put(state);
3601 drm_atomic_state_put(state); 3577unlock:
3602 drm_modeset_drop_locks(ctx); 3578 drm_modeset_drop_locks(ctx);
3603 drm_modeset_acquire_fini(ctx); 3579 drm_modeset_acquire_fini(ctx);
3604 mutex_unlock(&dev->mode_config.mutex); 3580 mutex_unlock(&dev->mode_config.mutex);
@@ -9117,6 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9117 u64 power_domain_mask; 9093 u64 power_domain_mask;
9118 bool active; 9094 bool active;
9119 9095
9096 if (INTEL_GEN(dev_priv) >= 9) {
9097 intel_crtc_init_scalers(crtc, pipe_config);
9098
9099 pipe_config->scaler_state.scaler_id = -1;
9100 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9101 }
9102
9120 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9103 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9121 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9104 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9122 return false; 9105 return false;
@@ -9145,13 +9128,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9145 pipe_config->gamma_mode = 9128 pipe_config->gamma_mode =
9146 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9129 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9147 9130
9148 if (INTEL_GEN(dev_priv) >= 9) {
9149 intel_crtc_init_scalers(crtc, pipe_config);
9150
9151 pipe_config->scaler_state.scaler_id = -1;
9152 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9153 }
9154
9155 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9131 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9156 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9132 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9157 power_domain_mask |= BIT_ULL(power_domain); 9133 power_domain_mask |= BIT_ULL(power_domain);
@@ -9540,7 +9516,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9540 * On some platforms writing CURCNTR first will also 9516 * On some platforms writing CURCNTR first will also
9541 * cause CURPOS to be armed by the CURBASE write. 9517 * cause CURPOS to be armed by the CURBASE write.
9542 * Without the CURCNTR write the CURPOS write would 9518 * Without the CURCNTR write the CURPOS write would
9543 * arm itself. 9519 * arm itself. Thus we always start the full update
9520 * with a CURCNTR write.
9521 *
9522 * On other platforms CURPOS always requires the
9523 * CURBASE write to arm the update. Additonally
9524 * a write to any of the cursor register will cancel
9525 * an already armed cursor update. Thus leaving out
9526 * the CURBASE write after CURPOS could lead to a
9527 * cursor that doesn't appear to move, or even change
9528 * shape. Thus we always write CURBASE.
9544 * 9529 *
9545 * CURCNTR and CUR_FBC_CTL are always 9530 * CURCNTR and CUR_FBC_CTL are always
9546 * armed by the CURBASE write only. 9531 * armed by the CURBASE write only.
@@ -9559,6 +9544,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9559 plane->cursor.cntl = cntl; 9544 plane->cursor.cntl = cntl;
9560 } else { 9545 } else {
9561 I915_WRITE_FW(CURPOS(pipe), pos); 9546 I915_WRITE_FW(CURPOS(pipe), pos);
9547 I915_WRITE_FW(CURBASE(pipe), base);
9562 } 9548 }
9563 9549
9564 POSTING_READ_FW(CURBASE(pipe)); 9550 POSTING_READ_FW(CURBASE(pipe));
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
45 return true; 45 return true;
46 if (IS_SKYLAKE(dev_priv)) 46 if (IS_SKYLAKE(dev_priv))
47 return true; 47 return true;
48 if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) 48 if (IS_KABYLAKE(dev_priv))
49 return true; 49 return true;
50 return false; 50 return false;
51} 51}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..40b224b44d1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4464 (plane_bytes_per_line / 512 < 1)) 4464 (plane_bytes_per_line / 512 < 1))
4465 selected_result = method2; 4465 selected_result = method2;
4466 else if ((ddb_allocation && ddb_allocation / 4466 else if (ddb_allocation >=
4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) 4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line))
4468 selected_result = min_fixed_16_16(method1, method2); 4468 selected_result = min_fixed_16_16(method1, method2);
4469 else if (latency >= linetime_us) 4469 else if (latency >= linetime_us)
4470 selected_result = min_fixed_16_16(method1, method2); 4470 selected_result = min_fixed_16_16(method1, method2);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..8cdec455cf7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
206 mkwrite_device_info(i915)->ring_mask = BIT(0); 206 mkwrite_device_info(i915)->ring_mask = BIT(0);
207 i915->engine[RCS] = mock_engine(i915, "mock"); 207 i915->engine[RCS] = mock_engine(i915, "mock");
208 if (!i915->engine[RCS]) 208 if (!i915->engine[RCS])
209 goto err_dependencies; 209 goto err_priorities;
210 210
211 i915->kernel_context = mock_context(i915, NULL); 211 i915->kernel_context = mock_context(i915, NULL);
212 if (!i915->kernel_context) 212 if (!i915->kernel_context)