aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
968 968
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1087 1087
1088 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1089 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1089 /* 1090 /*
1090 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1091 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2090{ 2091{
2091 struct drm_i915_private *dev_priv = to_i915(dev); 2092 struct drm_i915_private *dev_priv = to_i915(dev);
2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 struct drm_i915_gem_object *obj = ringbuf->obj;
2094 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2095 unsigned flags = PIN_OFFSET_BIAS | 4096;
2093 int ret; 2096 int ret;
2094 2097
2095 if (HAS_LLC(dev_priv) && !obj->stolen) { 2098 if (HAS_LLC(dev_priv) && !obj->stolen) {
2096 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2099 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2097 if (ret) 2100 if (ret)
2098 return ret; 2101 return ret;
2099 2102
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2109 return -ENOMEM; 2112 return -ENOMEM;
2110 } 2113 }
2111 } else { 2114 } else {
2112 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2115 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2116 flags | PIN_MAPPABLE);
2113 if (ret) 2117 if (ret)
2114 return ret; 2118 return ret;
2115 2119
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2454 if (unlikely(total_bytes > remain_usable)) { 2458 if (unlikely(total_bytes > remain_usable)) {
2455 /* 2459 /*
2456 * The base request will fit but the reserved space 2460 * The base request will fit but the reserved space
2457 * falls off the end. So only need to to wait for the 2461 * falls off the end. So don't need an immediate wrap
2458 * reserved size after flushing out the remainder. 2462 * and only need to effectively wait for the reserved
2463 * size space from the start of ringbuffer.
2459 */ 2464 */
2460 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 wait_bytes = remain_actual + ringbuf->reserved_size;
2461 need_wrap = true;
2462 } else if (total_bytes > ringbuf->space) { 2466 } else if (total_bytes > ringbuf->space) {
2463 /* No wrapping required, just waiting. */ 2467 /* No wrapping required, just waiting. */
2464 wait_bytes = total_bytes; 2468 wait_bytes = total_bytes;