aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c91
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
6 files changed, 65 insertions, 98 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2858e013642f..032459a50457 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -805,6 +805,8 @@ struct i915_fbc {
805 unsigned long uncompressed_size; 805 unsigned long uncompressed_size;
806 unsigned threshold; 806 unsigned threshold;
807 unsigned int fb_id; 807 unsigned int fb_id;
808 unsigned int possible_framebuffer_bits;
809 unsigned int busy_bits;
808 struct intel_crtc *crtc; 810 struct intel_crtc *crtc;
809 int y; 811 int y;
810 812
@@ -817,14 +819,6 @@ struct i915_fbc {
817 * possible. */ 819 * possible. */
818 bool enabled; 820 bool enabled;
819 821
820 /* On gen8 some rings cannont perform fbc clean operation so for now
821 * we are doing this on SW with mmio.
822 * This variable works in the opposite information direction
823 * of ring->fbc_dirty telling software on frontbuffer tracking
824 * to perform the cache clean on sw side.
825 */
826 bool need_sw_cache_clean;
827
828 struct intel_fbc_work { 822 struct intel_fbc_work {
829 struct delayed_work work; 823 struct delayed_work work;
830 struct drm_crtc *crtc; 824 struct drm_crtc *crtc;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 61a40ab61d6b..fbf81499b736 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1115,7 +1115,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
1115void intel_fbc_update(struct drm_device *dev); 1115void intel_fbc_update(struct drm_device *dev);
1116void intel_fbc_init(struct drm_i915_private *dev_priv); 1116void intel_fbc_init(struct drm_i915_private *dev_priv);
1117void intel_fbc_disable(struct drm_device *dev); 1117void intel_fbc_disable(struct drm_device *dev);
1118void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); 1118void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1119 unsigned int frontbuffer_bits,
1120 enum fb_op_origin origin);
1121void intel_fbc_flush(struct drm_i915_private *dev_priv,
1122 unsigned int frontbuffer_bits);
1119 1123
1120/* intel_hdmi.c */ 1124/* intel_hdmi.c */
1121void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); 1125void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 618f7bdab0ba..9fcf446e95f5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -174,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
174 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 174 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
175} 175}
176 176
177static void snb_fbc_blit_update(struct drm_device *dev) 177static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
180 u32 blt_ecoskpd; 180 POSTING_READ(MSG_FBC_REND_STATE);
181
182 /* Make sure blitter notifies FBC of writes */
183
184 /* Blitter is part of Media powerwell on VLV. No impact of
185 * his param in other platforms for now */
186 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
187
188 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
189 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
190 GEN6_BLITTER_LOCK_SHIFT;
191 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
192 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
193 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
194 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
195 GEN6_BLITTER_LOCK_SHIFT);
196 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
197 POSTING_READ(GEN6_BLITTER_ECOSKPD);
198
199 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
200} 181}
201 182
202static void ilk_fbc_enable(struct drm_crtc *crtc) 183static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -239,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
239 I915_WRITE(SNB_DPFC_CTL_SA, 220 I915_WRITE(SNB_DPFC_CTL_SA,
240 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 221 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
241 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 222 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
242 snb_fbc_blit_update(dev);
243 } 223 }
244 224
225 intel_fbc_nuke(dev_priv);
226
245 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 227 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
246} 228}
247 229
@@ -320,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
320 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 302 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
321 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 303 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
322 304
323 snb_fbc_blit_update(dev); 305 intel_fbc_nuke(dev_priv);
324 306
325 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 307 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
326} 308}
@@ -340,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
340 return dev_priv->fbc.enabled; 322 return dev_priv->fbc.enabled;
341} 323}
342 324
343void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
344{
345 struct drm_i915_private *dev_priv = dev->dev_private;
346
347 if (!IS_GEN8(dev))
348 return;
349
350 if (!intel_fbc_enabled(dev))
351 return;
352
353 I915_WRITE(MSG_FBC_REND_STATE, value);
354}
355
356static void intel_fbc_work_fn(struct work_struct *__work) 325static void intel_fbc_work_fn(struct work_struct *__work)
357{ 326{
358 struct intel_fbc_work *work = 327 struct intel_fbc_work *work =
@@ -685,6 +654,44 @@ out_disable:
685 i915_gem_stolen_cleanup_compression(dev); 654 i915_gem_stolen_cleanup_compression(dev);
686} 655}
687 656
657void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
658 unsigned int frontbuffer_bits,
659 enum fb_op_origin origin)
660{
661 struct drm_device *dev = dev_priv->dev;
662 unsigned int fbc_bits;
663
664 if (origin == ORIGIN_GTT)
665 return;
666
667 if (dev_priv->fbc.enabled)
668 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
669 else if (dev_priv->fbc.fbc_work)
670 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
671 to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
672 else
673 fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
674
675 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
676
677 if (dev_priv->fbc.busy_bits)
678 intel_fbc_disable(dev);
679}
680
681void intel_fbc_flush(struct drm_i915_private *dev_priv,
682 unsigned int frontbuffer_bits)
683{
684 struct drm_device *dev = dev_priv->dev;
685
686 if (!dev_priv->fbc.busy_bits)
687 return;
688
689 dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
690
691 if (!dev_priv->fbc.busy_bits)
692 intel_fbc_update(dev);
693}
694
688/** 695/**
689 * intel_fbc_init - Initialize FBC 696 * intel_fbc_init - Initialize FBC
690 * @dev_priv: the i915 device 697 * @dev_priv: the i915 device
@@ -693,12 +700,22 @@ out_disable:
693 */ 700 */
694void intel_fbc_init(struct drm_i915_private *dev_priv) 701void intel_fbc_init(struct drm_i915_private *dev_priv)
695{ 702{
703 enum pipe pipe;
704
696 if (!HAS_FBC(dev_priv)) { 705 if (!HAS_FBC(dev_priv)) {
697 dev_priv->fbc.enabled = false; 706 dev_priv->fbc.enabled = false;
698 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; 707 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
699 return; 708 return;
700 } 709 }
701 710
711 for_each_pipe(dev_priv, pipe) {
712 dev_priv->fbc.possible_framebuffer_bits |=
713 INTEL_FRONTBUFFER_PRIMARY(pipe);
714
715 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
716 break;
717 }
718
702 if (INTEL_INFO(dev_priv)->gen >= 7) { 719 if (INTEL_INFO(dev_priv)->gen >= 7) {
703 dev_priv->display.fbc_enabled = ilk_fbc_enabled; 720 dev_priv->display.fbc_enabled = ilk_fbc_enabled;
704 dev_priv->display.enable_fbc = gen7_fbc_enable; 721 dev_priv->display.enable_fbc = gen7_fbc_enable;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 5da73f0124ce..0a1bac8ac72b 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -118,8 +118,6 @@ static void intel_mark_fb_busy(struct drm_device *dev,
118 continue; 118 continue;
119 119
120 intel_increase_pllclock(dev, pipe); 120 intel_increase_pllclock(dev, pipe);
121 if (ring && intel_fbc_enabled(dev))
122 ring->fbc_dirty = true;
123 } 121 }
124} 122}
125 123
@@ -160,6 +158,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
160 158
161 intel_psr_invalidate(dev, obj->frontbuffer_bits); 159 intel_psr_invalidate(dev, obj->frontbuffer_bits);
162 intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); 160 intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
161 intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
163} 162}
164 163
165/** 164/**
@@ -187,16 +186,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
187 186
188 intel_edp_drrs_flush(dev, frontbuffer_bits); 187 intel_edp_drrs_flush(dev, frontbuffer_bits);
189 intel_psr_flush(dev, frontbuffer_bits); 188 intel_psr_flush(dev, frontbuffer_bits);
190 189 intel_fbc_flush(dev_priv, frontbuffer_bits);
191 /*
192 * FIXME: Unconditional fbc flushing here is a rather gross hack and
193 * needs to be reworked into a proper frontbuffer tracking scheme like
194 * psr employs.
195 */
196 if (dev_priv->fbc.need_sw_cache_clean) {
197 dev_priv->fbc.need_sw_cache_clean = false;
198 bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
199 }
200} 190}
201 191
202/** 192/**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cd79c3843452..e9858d2e92d0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
317 return 0; 317 return 0;
318} 318}
319 319
320static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
321{
322 int ret;
323
324 if (!ring->fbc_dirty)
325 return 0;
326
327 ret = intel_ring_begin(ring, 6);
328 if (ret)
329 return ret;
330 /* WaFbcNukeOn3DBlt:ivb/hsw */
331 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
332 intel_ring_emit(ring, MSG_FBC_REND_STATE);
333 intel_ring_emit(ring, value);
334 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
335 intel_ring_emit(ring, MSG_FBC_REND_STATE);
336 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
337 intel_ring_advance(ring);
338
339 ring->fbc_dirty = false;
340 return 0;
341}
342
343static int 320static int
344gen7_render_ring_flush(struct intel_engine_cs *ring, 321gen7_render_ring_flush(struct intel_engine_cs *ring,
345 u32 invalidate_domains, u32 flush_domains) 322 u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
398 intel_ring_emit(ring, 0); 375 intel_ring_emit(ring, 0);
399 intel_ring_advance(ring); 376 intel_ring_advance(ring);
400 377
401 if (!invalidate_domains && flush_domains)
402 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
403
404 return 0; 378 return 0;
405} 379}
406 380
@@ -462,9 +436,6 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
462 if (ret) 436 if (ret)
463 return ret; 437 return ret;
464 438
465 if (!invalidate_domains && flush_domains)
466 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
467
468 return 0; 439 return 0;
469} 440}
470 441
@@ -2477,7 +2448,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2477 u32 invalidate, u32 flush) 2448 u32 invalidate, u32 flush)
2478{ 2449{
2479 struct drm_device *dev = ring->dev; 2450 struct drm_device *dev = ring->dev;
2480 struct drm_i915_private *dev_priv = dev->dev_private;
2481 uint32_t cmd; 2451 uint32_t cmd;
2482 int ret; 2452 int ret;
2483 2453
@@ -2486,7 +2456,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2486 return ret; 2456 return ret;
2487 2457
2488 cmd = MI_FLUSH_DW; 2458 cmd = MI_FLUSH_DW;
2489 if (INTEL_INFO(ring->dev)->gen >= 8) 2459 if (INTEL_INFO(dev)->gen >= 8)
2490 cmd += 1; 2460 cmd += 1;
2491 2461
2492 /* We always require a command barrier so that subsequent 2462 /* We always require a command barrier so that subsequent
@@ -2506,7 +2476,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2506 cmd |= MI_INVALIDATE_TLB; 2476 cmd |= MI_INVALIDATE_TLB;
2507 intel_ring_emit(ring, cmd); 2477 intel_ring_emit(ring, cmd);
2508 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2478 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2509 if (INTEL_INFO(ring->dev)->gen >= 8) { 2479 if (INTEL_INFO(dev)->gen >= 8) {
2510 intel_ring_emit(ring, 0); /* upper addr */ 2480 intel_ring_emit(ring, 0); /* upper addr */
2511 intel_ring_emit(ring, 0); /* value */ 2481 intel_ring_emit(ring, 0); /* value */
2512 } else { 2482 } else {
@@ -2515,13 +2485,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2515 } 2485 }
2516 intel_ring_advance(ring); 2486 intel_ring_advance(ring);
2517 2487
2518 if (!invalidate && flush) {
2519 if (IS_GEN7(dev))
2520 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2521 else if (IS_BROADWELL(dev))
2522 dev_priv->fbc.need_sw_cache_clean = true;
2523 }
2524
2525 return 0; 2488 return 0;
2526} 2489}
2527 2490
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8f3b49a23ccf..c761fe05ad6f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,7 +267,6 @@ struct intel_engine_cs {
267 */ 267 */
268 struct drm_i915_gem_request *outstanding_lazy_request; 268 struct drm_i915_gem_request *outstanding_lazy_request;
269 bool gpu_caches_dirty; 269 bool gpu_caches_dirty;
270 bool fbc_dirty;
271 270
272 wait_queue_head_t irq_queue; 271 wait_queue_head_t irq_queue;
273 272