aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-01-15 10:10:27 -0500
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-01-18 04:58:36 -0500
commitca82580c9ceace0d52fe7376b8a72bb3b36f612b (patch)
tree06912c4a5ce10a499b114c27b96262a9b6a0ef01
parente0313db047c2f2e368c95a8f03653f9723678e82 (diff)
drm/i915: Do not call API requiring struct_mutex where it is not available
LRC code was calling GEM API like i915_gem_obj_ggtt_offset from places where the struct_mutex cannot be grabbed (irq handlers). To avoid that this patch caches some interesting bits and values in the engine and context structures. Some usages are also removed where they are not needed like a few asserts which are either impossible or have been checked already during engine initialization. Side benefit is also that interrupt handlers and command submission stop evaluating invariant conditionals, like what Gen we are running on, on every interrupt and every command submitted. This patch deals with logical ring context id and descriptors while subsequent patches will deal with the remaining issues. v2: * Cache the VMA instead of the address. (Chris Wilson) * Incorporate Dave Gordon's good comments and function name. v3: * Extract ctx descriptor template to a function and group functions dealing with ctx descriptor & co together near top of the file. (Dave Gordon) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Dave Gordon <david.s.gordon@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1452870629-13830-1-git-send-email-tvrtko.ursulin@linux.intel.com
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c151
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
6 files changed, 103 insertions, 72 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e3377abc0d4d..0b3550f05026 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1994,12 +1994,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
1994} 1994}
1995 1995
1996static void i915_dump_lrc_obj(struct seq_file *m, 1996static void i915_dump_lrc_obj(struct seq_file *m,
1997 struct intel_engine_cs *ring, 1997 struct intel_context *ctx,
1998 struct drm_i915_gem_object *ctx_obj) 1998 struct intel_engine_cs *ring)
1999{ 1999{
2000 struct page *page; 2000 struct page *page;
2001 uint32_t *reg_state; 2001 uint32_t *reg_state;
2002 int j; 2002 int j;
2003 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
2003 unsigned long ggtt_offset = 0; 2004 unsigned long ggtt_offset = 0;
2004 2005
2005 if (ctx_obj == NULL) { 2006 if (ctx_obj == NULL) {
@@ -2009,7 +2010,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
2009 } 2010 }
2010 2011
2011 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 2012 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
2012 intel_execlists_ctx_id(ctx_obj)); 2013 intel_execlists_ctx_id(ctx, ring));
2013 2014
2014 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2015 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2015 seq_puts(m, "\tNot bound in GGTT\n"); 2016 seq_puts(m, "\tNot bound in GGTT\n");
@@ -2058,8 +2059,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2058 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2059 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2059 for_each_ring(ring, dev_priv, i) { 2060 for_each_ring(ring, dev_priv, i) {
2060 if (ring->default_context != ctx) 2061 if (ring->default_context != ctx)
2061 i915_dump_lrc_obj(m, ring, 2062 i915_dump_lrc_obj(m, ctx, ring);
2062 ctx->engine[i].state);
2063 } 2063 }
2064 } 2064 }
2065 2065
@@ -2133,11 +2133,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2133 2133
2134 seq_printf(m, "\t%d requests in queue\n", count); 2134 seq_printf(m, "\t%d requests in queue\n", count);
2135 if (head_req) { 2135 if (head_req) {
2136 struct drm_i915_gem_object *ctx_obj;
2137
2138 ctx_obj = head_req->ctx->engine[ring_id].state;
2139 seq_printf(m, "\tHead request id: %u\n", 2136 seq_printf(m, "\tHead request id: %u\n",
2140 intel_execlists_ctx_id(ctx_obj)); 2137 intel_execlists_ctx_id(head_req->ctx, ring));
2141 seq_printf(m, "\tHead request tail: %u\n", 2138 seq_printf(m, "\tHead request tail: %u\n",
2142 head_req->tail); 2139 head_req->tail);
2143 } 2140 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eb7bb97f7316..acff98b9c148 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -888,6 +888,8 @@ struct intel_context {
888 struct drm_i915_gem_object *state; 888 struct drm_i915_gem_object *state;
889 struct intel_ringbuffer *ringbuf; 889 struct intel_ringbuffer *ringbuf;
890 int pin_count; 890 int pin_count;
891 struct i915_vma *lrc_vma;
892 u64 lrc_desc;
891 } engine[I915_NUM_RINGS]; 893 } engine[I915_NUM_RINGS];
892 894
893 struct list_head link; 895 struct list_head link;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b448ad832dcf..e5737963ab79 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
44 44
45#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) 45#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
46 46
47
48/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 47/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
49#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 48#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
50#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 49#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f5d89c845ede..86042dc1802c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -263,65 +263,92 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
263 return 0; 263 return 0;
264} 264}
265 265
266static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
268{
269 struct drm_device *dev = ring->dev;
270
271 ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
272 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
273 (ring->id == VCS || ring->id == VCS2);
274
275 ring->ctx_desc_template = GEN8_CTX_VALID;
276 ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
277 GEN8_CTX_ADDRESSING_MODE_SHIFT;
278 if (IS_GEN8(dev))
279 ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
280 ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
281
282 /* TODO: WaDisableLiteRestore when we start using semaphore
283 * signalling between Command Streamers */
284 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
285
286 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
287 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
288 if (ring->disable_lite_restore_wa)
289 ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
290}
291
266/** 292/**
267 * intel_execlists_ctx_id() - get the Execlists Context ID 293 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
268 * @ctx_obj: Logical Ring Context backing object. 294 * descriptor for a pinned context
269 * 295 *
270 * Do not confuse with ctx->id! Unfortunately we have a name overload 296 * @ctx: Context to work on
271 * here: the old context ID we pass to userspace as a handler so that 297 * @ring: Engine the descriptor will be used with
272 * they can refer to a context, and the new context ID we pass to the
273 * ELSP so that the GPU can inform us of the context status via
274 * interrupts.
275 * 298 *
276 * Return: 20-bits globally unique context ID. 299 * The context descriptor encodes various attributes of a context,
300 * including its GTT address and some flags. Because it's fairly
301 * expensive to calculate, we'll just do it once and cache the result,
302 * which remains valid until the context is unpinned.
303 *
304 * This is what a descriptor looks like, from LSB to MSB:
305 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
306 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
307 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
308 * bits 52-63: reserved, may encode the engine ID (for GuC)
277 */ 309 */
278u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) 310static void
311intel_lr_context_descriptor_update(struct intel_context *ctx,
312 struct intel_engine_cs *ring)
279{ 313{
280 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + 314 uint64_t lrca, desc;
281 LRC_PPHWSP_PN * PAGE_SIZE;
282 315
283 /* LRCA is required to be 4K aligned so the more significant 20 bits 316 lrca = ctx->engine[ring->id].lrc_vma->node.start +
284 * are globally unique */ 317 LRC_PPHWSP_PN * PAGE_SIZE;
285 return lrca >> 12;
286}
287 318
288static bool disable_lite_restore_wa(struct intel_engine_cs *ring) 319 desc = ring->ctx_desc_template; /* bits 0-11 */
289{ 320 desc |= lrca; /* bits 12-31 */
290 struct drm_device *dev = ring->dev; 321 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
291 322
292 return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 323 ctx->engine[ring->id].lrc_desc = desc;
293 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
294 (ring->id == VCS || ring->id == VCS2);
295} 324}
296 325
297uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 326uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
298 struct intel_engine_cs *ring) 327 struct intel_engine_cs *ring)
299{ 328{
300 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 329 return ctx->engine[ring->id].lrc_desc;
301 uint64_t desc; 330}
302 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
303 LRC_PPHWSP_PN * PAGE_SIZE;
304
305 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
306
307 desc = GEN8_CTX_VALID;
308 desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
309 if (IS_GEN8(ctx_obj->base.dev))
310 desc |= GEN8_CTX_L3LLC_COHERENT;
311 desc |= GEN8_CTX_PRIVILEGE;
312 desc |= lrca;
313 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
314
315 /* TODO: WaDisableLiteRestore when we start using semaphore
316 * signalling between Command Streamers */
317 /* desc |= GEN8_CTX_FORCE_RESTORE; */
318
319 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
320 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
321 if (disable_lite_restore_wa(ring))
322 desc |= GEN8_CTX_FORCE_RESTORE;
323 331
324 return desc; 332/**
333 * intel_execlists_ctx_id() - get the Execlists Context ID
334 * @ctx: Context to get the ID for
335 * @ring: Engine to get the ID for
336 *
337 * Do not confuse with ctx->id! Unfortunately we have a name overload
338 * here: the old context ID we pass to userspace as a handler so that
339 * they can refer to a context, and the new context ID we pass to the
340 * ELSP so that the GPU can inform us of the context status via
341 * interrupts.
342 *
343 * The context ID is a portion of the context descriptor, so we can
344 * just extract the required part from the cached descriptor.
345 *
346 * Return: 20-bits globally unique context ID.
347 */
348u32 intel_execlists_ctx_id(struct intel_context *ctx,
349 struct intel_engine_cs *ring)
350{
351 return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
325} 352}
326 353
327static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 354static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -369,8 +396,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
369 uint32_t *reg_state; 396 uint32_t *reg_state;
370 397
371 BUG_ON(!ctx_obj); 398 BUG_ON(!ctx_obj);
372 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
373 WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
374 399
375 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); 400 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
376 reg_state = kmap_atomic(page); 401 reg_state = kmap_atomic(page);
@@ -477,9 +502,7 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
477 execlist_link); 502 execlist_link);
478 503
479 if (head_req != NULL) { 504 if (head_req != NULL) {
480 struct drm_i915_gem_object *ctx_obj = 505 if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
481 head_req->ctx->engine[ring->id].state;
482 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
483 WARN(head_req->elsp_submitted == 0, 506 WARN(head_req->elsp_submitted == 0,
484 "Never submitted head request\n"); 507 "Never submitted head request\n");
485 508
@@ -556,7 +579,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
556 } 579 }
557 } 580 }
558 581
559 if (disable_lite_restore_wa(ring)) { 582 if (ring->disable_lite_restore_wa) {
560 /* Prevent a ctx to preempt itself */ 583 /* Prevent a ctx to preempt itself */
561 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && 584 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
562 (submit_contexts != 0)) 585 (submit_contexts != 0))
@@ -1039,14 +1062,16 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
1039} 1062}
1040 1063
1041static int intel_lr_context_do_pin(struct intel_engine_cs *ring, 1064static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
1042 struct drm_i915_gem_object *ctx_obj, 1065 struct intel_context *ctx)
1043 struct intel_ringbuffer *ringbuf)
1044{ 1066{
1045 struct drm_device *dev = ring->dev; 1067 struct drm_device *dev = ring->dev;
1046 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_i915_private *dev_priv = dev->dev_private;
1047 int ret = 0; 1069 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
1070 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1071 int ret;
1048 1072
1049 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1073 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1074
1050 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 1075 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1051 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 1076 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1052 if (ret) 1077 if (ret)
@@ -1056,6 +1081,8 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
1056 if (ret) 1081 if (ret)
1057 goto unpin_ctx_obj; 1082 goto unpin_ctx_obj;
1058 1083
1084 ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1085 intel_lr_context_descriptor_update(ctx, ring);
1059 ctx_obj->dirty = true; 1086 ctx_obj->dirty = true;
1060 1087
1061 /* Invalidate GuC TLB. */ 1088 /* Invalidate GuC TLB. */
@@ -1074,11 +1101,9 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
1074{ 1101{
1075 int ret = 0; 1102 int ret = 0;
1076 struct intel_engine_cs *ring = rq->ring; 1103 struct intel_engine_cs *ring = rq->ring;
1077 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
1078 struct intel_ringbuffer *ringbuf = rq->ringbuf;
1079 1104
1080 if (rq->ctx->engine[ring->id].pin_count++ == 0) { 1105 if (rq->ctx->engine[ring->id].pin_count++ == 0) {
1081 ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); 1106 ret = intel_lr_context_do_pin(ring, rq->ctx);
1082 if (ret) 1107 if (ret)
1083 goto reset_pin_count; 1108 goto reset_pin_count;
1084 } 1109 }
@@ -1100,6 +1125,8 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
1100 if (--rq->ctx->engine[ring->id].pin_count == 0) { 1125 if (--rq->ctx->engine[ring->id].pin_count == 0) {
1101 intel_unpin_ringbuffer_obj(ringbuf); 1126 intel_unpin_ringbuffer_obj(ringbuf);
1102 i915_gem_object_ggtt_unpin(ctx_obj); 1127 i915_gem_object_ggtt_unpin(ctx_obj);
1128 rq->ctx->engine[ring->id].lrc_vma = NULL;
1129 rq->ctx->engine[ring->id].lrc_desc = 0;
1103 } 1130 }
1104 } 1131 }
1105} 1132}
@@ -1939,6 +1966,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1939 ring->status_page.obj = NULL; 1966 ring->status_page.obj = NULL;
1940 } 1967 }
1941 1968
1969 ring->disable_lite_restore_wa = false;
1970 ring->ctx_desc_template = 0;
1971
1942 lrc_destroy_wa_ctx_obj(ring); 1972 lrc_destroy_wa_ctx_obj(ring);
1943 ring->dev = NULL; 1973 ring->dev = NULL;
1944} 1974}
@@ -1989,6 +2019,8 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1989 INIT_LIST_HEAD(&ring->execlist_retired_req_list); 2019 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
1990 spin_lock_init(&ring->execlist_lock); 2020 spin_lock_init(&ring->execlist_lock);
1991 2021
2022 logical_ring_init_platform_invariants(ring);
2023
1992 ret = i915_cmd_parser_init_ring(ring); 2024 ret = i915_cmd_parser_init_ring(ring);
1993 if (ret) 2025 if (ret)
1994 goto error; 2026 goto error;
@@ -1998,10 +2030,7 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1998 goto error; 2030 goto error;
1999 2031
2000 /* As this is the default context, always pin it */ 2032 /* As this is the default context, always pin it */
2001 ret = intel_lr_context_do_pin( 2033 ret = intel_lr_context_do_pin(ring, ring->default_context);
2002 ring,
2003 ring->default_context->engine[ring->id].state,
2004 ring->default_context->engine[ring->id].ringbuf);
2005 if (ret) { 2034 if (ret) {
2006 DRM_ERROR( 2035 DRM_ERROR(
2007 "Failed to pin and map ringbuffer %s: %d\n", 2036 "Failed to pin and map ringbuffer %s: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index de41ad6cd63d..49af638f6213 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -107,13 +107,15 @@ void intel_lr_context_reset(struct drm_device *dev,
107uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 107uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
108 struct intel_engine_cs *ring); 108 struct intel_engine_cs *ring);
109 109
110u32 intel_execlists_ctx_id(struct intel_context *ctx,
111 struct intel_engine_cs *ring);
112
110/* Execlists */ 113/* Execlists */
111int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 114int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
112struct i915_execbuffer_params; 115struct i915_execbuffer_params;
113int intel_execlists_submission(struct i915_execbuffer_params *params, 116int intel_execlists_submission(struct i915_execbuffer_params *params,
114 struct drm_i915_gem_execbuffer2 *args, 117 struct drm_i915_gem_execbuffer2 *args,
115 struct list_head *vmas); 118 struct list_head *vmas);
116u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
117 119
118void intel_lrc_irq_handler(struct intel_engine_cs *ring); 120void intel_lrc_irq_handler(struct intel_engine_cs *ring);
119void intel_execlists_retire_requests(struct intel_engine_cs *ring); 121void intel_execlists_retire_requests(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7349d9258191..85ce2272f92c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -269,6 +269,8 @@ struct intel_engine_cs {
269 struct list_head execlist_queue; 269 struct list_head execlist_queue;
270 struct list_head execlist_retired_req_list; 270 struct list_head execlist_retired_req_list;
271 u8 next_context_status_buffer; 271 u8 next_context_status_buffer;
272 bool disable_lite_restore_wa;
273 u32 ctx_desc_template;
272 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 274 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
273 int (*emit_request)(struct drm_i915_gem_request *request); 275 int (*emit_request)(struct drm_i915_gem_request *request);
274 int (*emit_flush)(struct drm_i915_gem_request *request, 276 int (*emit_flush)(struct drm_i915_gem_request *request,