aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-03-16 07:00:37 -0400
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-03-16 11:33:10 -0400
commit0bc40be85f33ca1795253a5f8674efb430f83cce (patch)
tree6d0546ca8e222bb60fd0fe222f1cd1438ef319fa /drivers/gpu/drm/i915/intel_lrc.c
parente2f80391478af71bbbc91686fe0efc580b907caa (diff)
drm/i915: Rename intel_engine_cs function parameters
@@ identifier func; @@ func(..., struct intel_engine_cs * - ring + engine , ...) { <... - ring + engine ...> } @@ identifier func; type T; @@ T func(..., struct intel_engine_cs * - ring + engine , ...); Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c566
1 files changed, 298 insertions, 268 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 448c68e69194..25514e91479a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -228,8 +228,8 @@ enum {
228 228
229static int intel_lr_context_pin(struct intel_context *ctx, 229static int intel_lr_context_pin(struct intel_context *ctx,
230 struct intel_engine_cs *engine); 230 struct intel_engine_cs *engine);
231static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, 231static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
232 struct drm_i915_gem_object *default_ctx_obj); 232 struct drm_i915_gem_object *default_ctx_obj);
233 233
234 234
235/** 235/**
@@ -266,23 +266,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266} 266}
267 267
268static void 268static void
269logical_ring_init_platform_invariants(struct intel_engine_cs *ring) 269logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
270{ 270{
271 struct drm_device *dev = ring->dev; 271 struct drm_device *dev = engine->dev;
272 272
273 if (IS_GEN8(dev) || IS_GEN9(dev)) 273 if (IS_GEN8(dev) || IS_GEN9(dev))
274 ring->idle_lite_restore_wa = ~0; 274 engine->idle_lite_restore_wa = ~0;
275 275
276 ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 276 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
278 (ring->id == VCS || ring->id == VCS2); 278 (engine->id == VCS || engine->id == VCS2);
279 279
280 ring->ctx_desc_template = GEN8_CTX_VALID; 280 engine->ctx_desc_template = GEN8_CTX_VALID;
281 ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 281 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
282 GEN8_CTX_ADDRESSING_MODE_SHIFT; 282 GEN8_CTX_ADDRESSING_MODE_SHIFT;
283 if (IS_GEN8(dev)) 283 if (IS_GEN8(dev))
284 ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 284 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285 ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 285 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
286 286
287 /* TODO: WaDisableLiteRestore when we start using semaphore 287 /* TODO: WaDisableLiteRestore when we start using semaphore
288 * signalling between Command Streamers */ 288 * signalling between Command Streamers */
@@ -290,8 +290,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
290 290
291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ 292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
293 if (ring->disable_lite_restore_wa) 293 if (engine->disable_lite_restore_wa)
294 ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; 294 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
295} 295}
296 296
297/** 297/**
@@ -314,24 +314,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
314 */ 314 */
315static void 315static void
316intel_lr_context_descriptor_update(struct intel_context *ctx, 316intel_lr_context_descriptor_update(struct intel_context *ctx,
317 struct intel_engine_cs *ring) 317 struct intel_engine_cs *engine)
318{ 318{
319 uint64_t lrca, desc; 319 uint64_t lrca, desc;
320 320
321 lrca = ctx->engine[ring->id].lrc_vma->node.start + 321 lrca = ctx->engine[engine->id].lrc_vma->node.start +
322 LRC_PPHWSP_PN * PAGE_SIZE; 322 LRC_PPHWSP_PN * PAGE_SIZE;
323 323
324 desc = ring->ctx_desc_template; /* bits 0-11 */ 324 desc = engine->ctx_desc_template; /* bits 0-11 */
325 desc |= lrca; /* bits 12-31 */ 325 desc |= lrca; /* bits 12-31 */
326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
327 327
328 ctx->engine[ring->id].lrc_desc = desc; 328 ctx->engine[engine->id].lrc_desc = desc;
329} 329}
330 330
331uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 331uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
332 struct intel_engine_cs *ring) 332 struct intel_engine_cs *engine)
333{ 333{
334 return ctx->engine[ring->id].lrc_desc; 334 return ctx->engine[engine->id].lrc_desc;
335} 335}
336 336
337/** 337/**
@@ -351,9 +351,9 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
351 * Return: 20-bits globally unique context ID. 351 * Return: 20-bits globally unique context ID.
352 */ 352 */
353u32 intel_execlists_ctx_id(struct intel_context *ctx, 353u32 intel_execlists_ctx_id(struct intel_context *ctx,
354 struct intel_engine_cs *ring) 354 struct intel_engine_cs *engine)
355{ 355{
356 return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT; 356 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
357} 357}
358 358
359static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 359static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -424,21 +424,21 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
424 execlists_elsp_write(rq0, rq1); 424 execlists_elsp_write(rq0, rq1);
425} 425}
426 426
427static void execlists_context_unqueue__locked(struct intel_engine_cs *ring) 427static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
428{ 428{
429 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; 429 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
430 struct drm_i915_gem_request *cursor, *tmp; 430 struct drm_i915_gem_request *cursor, *tmp;
431 431
432 assert_spin_locked(&ring->execlist_lock); 432 assert_spin_locked(&engine->execlist_lock);
433 433
434 /* 434 /*
435 * If irqs are not active generate a warning as batches that finish 435 * If irqs are not active generate a warning as batches that finish
436 * without the irqs may get lost and a GPU Hang may occur. 436 * without the irqs may get lost and a GPU Hang may occur.
437 */ 437 */
438 WARN_ON(!intel_irqs_enabled(ring->dev->dev_private)); 438 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
439 439
440 /* Try to read in pairs */ 440 /* Try to read in pairs */
441 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, 441 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
442 execlist_link) { 442 execlist_link) {
443 if (!req0) { 443 if (!req0) {
444 req0 = cursor; 444 req0 = cursor;
@@ -447,7 +447,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
447 * will update tail past first request's workload */ 447 * will update tail past first request's workload */
448 cursor->elsp_submitted = req0->elsp_submitted; 448 cursor->elsp_submitted = req0->elsp_submitted;
449 list_move_tail(&req0->execlist_link, 449 list_move_tail(&req0->execlist_link,
450 &ring->execlist_retired_req_list); 450 &engine->execlist_retired_req_list);
451 req0 = cursor; 451 req0 = cursor;
452 } else { 452 } else {
453 req1 = cursor; 453 req1 = cursor;
@@ -459,7 +459,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
459 if (unlikely(!req0)) 459 if (unlikely(!req0))
460 return; 460 return;
461 461
462 if (req0->elsp_submitted & ring->idle_lite_restore_wa) { 462 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
463 /* 463 /*
464 * WaIdleLiteRestore: make sure we never cause a lite restore 464 * WaIdleLiteRestore: make sure we never cause a lite restore
465 * with HEAD==TAIL. 465 * with HEAD==TAIL.
@@ -470,7 +470,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
470 */ 470 */
471 struct intel_ringbuffer *ringbuf; 471 struct intel_ringbuffer *ringbuf;
472 472
473 ringbuf = req0->ctx->engine[ring->id].ringbuf; 473 ringbuf = req0->ctx->engine[engine->id].ringbuf;
474 req0->tail += 8; 474 req0->tail += 8;
475 req0->tail &= ringbuf->size - 1; 475 req0->tail &= ringbuf->size - 1;
476 } 476 }
@@ -478,34 +478,34 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
478 execlists_submit_requests(req0, req1); 478 execlists_submit_requests(req0, req1);
479} 479}
480 480
481static void execlists_context_unqueue(struct intel_engine_cs *ring) 481static void execlists_context_unqueue(struct intel_engine_cs *engine)
482{ 482{
483 struct drm_i915_private *dev_priv = ring->dev->dev_private; 483 struct drm_i915_private *dev_priv = engine->dev->dev_private;
484 484
485 spin_lock(&dev_priv->uncore.lock); 485 spin_lock(&dev_priv->uncore.lock);
486 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); 486 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
487 487
488 execlists_context_unqueue__locked(ring); 488 execlists_context_unqueue__locked(engine);
489 489
490 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 490 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
491 spin_unlock(&dev_priv->uncore.lock); 491 spin_unlock(&dev_priv->uncore.lock);
492} 492}
493 493
494static unsigned int 494static unsigned int
495execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id) 495execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496{ 496{
497 struct drm_i915_gem_request *head_req; 497 struct drm_i915_gem_request *head_req;
498 498
499 assert_spin_locked(&ring->execlist_lock); 499 assert_spin_locked(&engine->execlist_lock);
500 500
501 head_req = list_first_entry_or_null(&ring->execlist_queue, 501 head_req = list_first_entry_or_null(&engine->execlist_queue,
502 struct drm_i915_gem_request, 502 struct drm_i915_gem_request,
503 execlist_link); 503 execlist_link);
504 504
505 if (!head_req) 505 if (!head_req)
506 return 0; 506 return 0;
507 507
508 if (unlikely(intel_execlists_ctx_id(head_req->ctx, ring) != request_id)) 508 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
509 return 0; 509 return 0;
510 510
511 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 511 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
@@ -514,26 +514,26 @@ execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id)
514 return 0; 514 return 0;
515 515
516 list_move_tail(&head_req->execlist_link, 516 list_move_tail(&head_req->execlist_link,
517 &ring->execlist_retired_req_list); 517 &engine->execlist_retired_req_list);
518 518
519 return 1; 519 return 1;
520} 520}
521 521
522static u32 522static u32
523get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer, 523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
524 u32 *context_id) 524 u32 *context_id)
525{ 525{
526 struct drm_i915_private *dev_priv = ring->dev->dev_private; 526 struct drm_i915_private *dev_priv = engine->dev->dev_private;
527 u32 status; 527 u32 status;
528 528
529 read_pointer %= GEN8_CSB_ENTRIES; 529 read_pointer %= GEN8_CSB_ENTRIES;
530 530
531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); 531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
532 532
533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
534 return 0; 534 return 0;
535 535
536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(ring, 536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
537 read_pointer)); 537 read_pointer));
538 538
539 return status; 539 return status;
@@ -546,33 +546,34 @@ get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer,
546 * Check the unread Context Status Buffers and manage the submission of new 546 * Check the unread Context Status Buffers and manage the submission of new
547 * contexts to the ELSP accordingly. 547 * contexts to the ELSP accordingly.
548 */ 548 */
549void intel_lrc_irq_handler(struct intel_engine_cs *ring) 549void intel_lrc_irq_handler(struct intel_engine_cs *engine)
550{ 550{
551 struct drm_i915_private *dev_priv = ring->dev->dev_private; 551 struct drm_i915_private *dev_priv = engine->dev->dev_private;
552 u32 status_pointer; 552 u32 status_pointer;
553 unsigned int read_pointer, write_pointer; 553 unsigned int read_pointer, write_pointer;
554 u32 status = 0; 554 u32 status = 0;
555 u32 status_id; 555 u32 status_id;
556 unsigned int submit_contexts = 0; 556 unsigned int submit_contexts = 0;
557 557
558 spin_lock(&ring->execlist_lock); 558 spin_lock(&engine->execlist_lock);
559 559
560 spin_lock(&dev_priv->uncore.lock); 560 spin_lock(&dev_priv->uncore.lock);
561 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); 561 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
562 562
563 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(ring)); 563 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
564 564
565 read_pointer = ring->next_context_status_buffer; 565 read_pointer = engine->next_context_status_buffer;
566 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 566 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
567 if (read_pointer > write_pointer) 567 if (read_pointer > write_pointer)
568 write_pointer += GEN8_CSB_ENTRIES; 568 write_pointer += GEN8_CSB_ENTRIES;
569 569
570 while (read_pointer < write_pointer) { 570 while (read_pointer < write_pointer) {
571 status = get_context_status(ring, ++read_pointer, &status_id); 571 status = get_context_status(engine, ++read_pointer,
572 &status_id);
572 573
573 if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) { 574 if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) {
574 if (status & GEN8_CTX_STATUS_LITE_RESTORE) { 575 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
575 if (execlists_check_remove_request(ring, status_id)) 576 if (execlists_check_remove_request(engine, status_id))
576 WARN(1, "Lite Restored request removed from queue\n"); 577 WARN(1, "Lite Restored request removed from queue\n");
577 } else 578 } else
578 WARN(1, "Preemption without Lite Restore\n"); 579 WARN(1, "Preemption without Lite Restore\n");
@@ -581,27 +582,28 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
581 if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE | 582 if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE |
582 GEN8_CTX_STATUS_ELEMENT_SWITCH)) 583 GEN8_CTX_STATUS_ELEMENT_SWITCH))
583 submit_contexts += 584 submit_contexts +=
584 execlists_check_remove_request(ring, status_id); 585 execlists_check_remove_request(engine,
586 status_id);
585 } 587 }
586 588
587 if (submit_contexts) { 589 if (submit_contexts) {
588 if (!ring->disable_lite_restore_wa || 590 if (!engine->disable_lite_restore_wa ||
589 (status & GEN8_CTX_STATUS_ACTIVE_IDLE)) 591 (status & GEN8_CTX_STATUS_ACTIVE_IDLE))
590 execlists_context_unqueue__locked(ring); 592 execlists_context_unqueue__locked(engine);
591 } 593 }
592 594
593 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; 595 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
594 596
595 /* Update the read pointer to the old write pointer. Manual ringbuffer 597 /* Update the read pointer to the old write pointer. Manual ringbuffer
596 * management ftw </sarcasm> */ 598 * management ftw </sarcasm> */
597 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(ring), 599 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
598 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 600 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
599 ring->next_context_status_buffer << 8)); 601 engine->next_context_status_buffer << 8));
600 602
601 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 603 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
602 spin_unlock(&dev_priv->uncore.lock); 604 spin_unlock(&dev_priv->uncore.lock);
603 605
604 spin_unlock(&ring->execlist_lock); 606 spin_unlock(&engine->execlist_lock);
605 607
606 if (unlikely(submit_contexts > 2)) 608 if (unlikely(submit_contexts > 2))
607 DRM_ERROR("More than two context complete events?\n"); 609 DRM_ERROR("More than two context complete events?\n");
@@ -1020,53 +1022,53 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
1020 return 0; 1022 return 0;
1021} 1023}
1022 1024
1023void intel_execlists_retire_requests(struct intel_engine_cs *ring) 1025void intel_execlists_retire_requests(struct intel_engine_cs *engine)
1024{ 1026{
1025 struct drm_i915_gem_request *req, *tmp; 1027 struct drm_i915_gem_request *req, *tmp;
1026 struct list_head retired_list; 1028 struct list_head retired_list;
1027 1029
1028 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1030 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1029 if (list_empty(&ring->execlist_retired_req_list)) 1031 if (list_empty(&engine->execlist_retired_req_list))
1030 return; 1032 return;
1031 1033
1032 INIT_LIST_HEAD(&retired_list); 1034 INIT_LIST_HEAD(&retired_list);
1033 spin_lock_irq(&ring->execlist_lock); 1035 spin_lock_irq(&engine->execlist_lock);
1034 list_replace_init(&ring->execlist_retired_req_list, &retired_list); 1036 list_replace_init(&engine->execlist_retired_req_list, &retired_list);
1035 spin_unlock_irq(&ring->execlist_lock); 1037 spin_unlock_irq(&engine->execlist_lock);
1036 1038
1037 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 1039 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
1038 struct intel_context *ctx = req->ctx; 1040 struct intel_context *ctx = req->ctx;
1039 struct drm_i915_gem_object *ctx_obj = 1041 struct drm_i915_gem_object *ctx_obj =
1040 ctx->engine[ring->id].state; 1042 ctx->engine[engine->id].state;
1041 1043
1042 if (ctx_obj && (ctx != req->i915->kernel_context)) 1044 if (ctx_obj && (ctx != req->i915->kernel_context))
1043 intel_lr_context_unpin(ctx, ring); 1045 intel_lr_context_unpin(ctx, engine);
1044 1046
1045 list_del(&req->execlist_link); 1047 list_del(&req->execlist_link);
1046 i915_gem_request_unreference(req); 1048 i915_gem_request_unreference(req);
1047 } 1049 }
1048} 1050}
1049 1051
1050void intel_logical_ring_stop(struct intel_engine_cs *ring) 1052void intel_logical_ring_stop(struct intel_engine_cs *engine)
1051{ 1053{
1052 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1054 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1053 int ret; 1055 int ret;
1054 1056
1055 if (!intel_ring_initialized(ring)) 1057 if (!intel_ring_initialized(engine))
1056 return; 1058 return;
1057 1059
1058 ret = intel_ring_idle(ring); 1060 ret = intel_ring_idle(engine);
1059 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 1061 if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
1060 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1062 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1061 ring->name, ret); 1063 engine->name, ret);
1062 1064
1063 /* TODO: Is this correct with Execlists enabled? */ 1065 /* TODO: Is this correct with Execlists enabled? */
1064 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 1066 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
1065 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 1067 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
1066 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 1068 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
1067 return; 1069 return;
1068 } 1070 }
1069 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 1071 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
1070} 1072}
1071 1073
1072int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) 1074int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
@@ -1086,17 +1088,17 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
1086} 1088}
1087 1089
1088static int intel_lr_context_do_pin(struct intel_context *ctx, 1090static int intel_lr_context_do_pin(struct intel_context *ctx,
1089 struct intel_engine_cs *ring) 1091 struct intel_engine_cs *engine)
1090{ 1092{
1091 struct drm_device *dev = ring->dev; 1093 struct drm_device *dev = engine->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1094 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 1095 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1094 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 1096 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
1095 struct page *lrc_state_page; 1097 struct page *lrc_state_page;
1096 uint32_t *lrc_reg_state; 1098 uint32_t *lrc_reg_state;
1097 int ret; 1099 int ret;
1098 1100
1099 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1101 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1100 1102
1101 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 1103 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1102 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 1104 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
@@ -1109,15 +1111,15 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
1109 goto unpin_ctx_obj; 1111 goto unpin_ctx_obj;
1110 } 1112 }
1111 1113
1112 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); 1114 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
1113 if (ret) 1115 if (ret)
1114 goto unpin_ctx_obj; 1116 goto unpin_ctx_obj;
1115 1117
1116 ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 1118 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1117 intel_lr_context_descriptor_update(ctx, ring); 1119 intel_lr_context_descriptor_update(ctx, engine);
1118 lrc_reg_state = kmap(lrc_state_page); 1120 lrc_reg_state = kmap(lrc_state_page);
1119 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 1121 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
1120 ctx->engine[ring->id].lrc_reg_state = lrc_reg_state; 1122 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
1121 ctx_obj->dirty = true; 1123 ctx_obj->dirty = true;
1122 1124
1123 /* Invalidate GuC TLB. */ 1125 /* Invalidate GuC TLB. */
@@ -1235,7 +1237,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1235 * This WA is also required for Gen9 so extracting as a function avoids 1237 * This WA is also required for Gen9 so extracting as a function avoids
1236 * code duplication. 1238 * code duplication.
1237 */ 1239 */
1238static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, 1240static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1239 uint32_t *const batch, 1241 uint32_t *const batch,
1240 uint32_t index) 1242 uint32_t index)
1241{ 1243{
@@ -1247,13 +1249,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1247 * this batch updates GEN8_L3SQCREG4 with default value we need to 1249 * this batch updates GEN8_L3SQCREG4 with default value we need to
1248 * set this bit here to retain the WA during flush. 1250 * set this bit here to retain the WA during flush.
1249 */ 1251 */
1250 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0)) 1252 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
1251 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1253 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1252 1254
1253 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1255 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1254 MI_SRM_LRM_GLOBAL_GTT)); 1256 MI_SRM_LRM_GLOBAL_GTT));
1255 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1257 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1256 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1258 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1257 wa_ctx_emit(batch, index, 0); 1259 wa_ctx_emit(batch, index, 0);
1258 1260
1259 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1261 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1271,7 +1273,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1271 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1273 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1272 MI_SRM_LRM_GLOBAL_GTT)); 1274 MI_SRM_LRM_GLOBAL_GTT));
1273 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1275 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1274 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1276 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1275 wa_ctx_emit(batch, index, 0); 1277 wa_ctx_emit(batch, index, 0);
1276 1278
1277 return index; 1279 return index;
@@ -1324,7 +1326,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1324 * Return: non-zero if we exceed the PAGE_SIZE limit. 1326 * Return: non-zero if we exceed the PAGE_SIZE limit.
1325 */ 1327 */
1326 1328
1327static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, 1329static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1328 struct i915_wa_ctx_bb *wa_ctx, 1330 struct i915_wa_ctx_bb *wa_ctx,
1329 uint32_t *const batch, 1331 uint32_t *const batch,
1330 uint32_t *offset) 1332 uint32_t *offset)
@@ -1336,8 +1338,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1336 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1338 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1337 1339
1338 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1340 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1339 if (IS_BROADWELL(ring->dev)) { 1341 if (IS_BROADWELL(engine->dev)) {
1340 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index); 1342 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1341 if (rc < 0) 1343 if (rc < 0)
1342 return rc; 1344 return rc;
1343 index = rc; 1345 index = rc;
@@ -1345,7 +1347,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1345 1347
1346 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1348 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1347 /* Actual scratch location is at 128 bytes offset */ 1349 /* Actual scratch location is at 128 bytes offset */
1348 scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES; 1350 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1349 1351
1350 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1352 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1351 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1353 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1387,7 +1389,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1387 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding 1389 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1388 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. 1390 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1389 */ 1391 */
1390static int gen8_init_perctx_bb(struct intel_engine_cs *ring, 1392static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1391 struct i915_wa_ctx_bb *wa_ctx, 1393 struct i915_wa_ctx_bb *wa_ctx,
1392 uint32_t *const batch, 1394 uint32_t *const batch,
1393 uint32_t *offset) 1395 uint32_t *offset)
@@ -1402,13 +1404,13 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
1402 return wa_ctx_end(wa_ctx, *offset = index, 1); 1404 return wa_ctx_end(wa_ctx, *offset = index, 1);
1403} 1405}
1404 1406
1405static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, 1407static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1406 struct i915_wa_ctx_bb *wa_ctx, 1408 struct i915_wa_ctx_bb *wa_ctx,
1407 uint32_t *const batch, 1409 uint32_t *const batch,
1408 uint32_t *offset) 1410 uint32_t *offset)
1409{ 1411{
1410 int ret; 1412 int ret;
1411 struct drm_device *dev = ring->dev; 1413 struct drm_device *dev = engine->dev;
1412 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1414 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1413 1415
1414 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1416 /* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1417,7 +1419,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1417 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1419 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1418 1420
1419 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1421 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1420 ret = gen8_emit_flush_coherentl3_wa(ring, batch, index); 1422 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1421 if (ret < 0) 1423 if (ret < 0)
1422 return ret; 1424 return ret;
1423 index = ret; 1425 index = ret;
@@ -1429,12 +1431,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1429 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1431 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1430} 1432}
1431 1433
1432static int gen9_init_perctx_bb(struct intel_engine_cs *ring, 1434static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1433 struct i915_wa_ctx_bb *wa_ctx, 1435 struct i915_wa_ctx_bb *wa_ctx,
1434 uint32_t *const batch, 1436 uint32_t *const batch,
1435 uint32_t *offset) 1437 uint32_t *offset)
1436{ 1438{
1437 struct drm_device *dev = ring->dev; 1439 struct drm_device *dev = engine->dev;
1438 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1440 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1439 1441
1440 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1442 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1457,60 +1459,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1457 return wa_ctx_end(wa_ctx, *offset = index, 1); 1459 return wa_ctx_end(wa_ctx, *offset = index, 1);
1458} 1460}
1459 1461
1460static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size) 1462static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1461{ 1463{
1462 int ret; 1464 int ret;
1463 1465
1464 ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size)); 1466 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
1465 if (!ring->wa_ctx.obj) { 1467 PAGE_ALIGN(size));
1468 if (!engine->wa_ctx.obj) {
1466 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1469 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1467 return -ENOMEM; 1470 return -ENOMEM;
1468 } 1471 }
1469 1472
1470 ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0); 1473 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1471 if (ret) { 1474 if (ret) {
1472 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", 1475 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1473 ret); 1476 ret);
1474 drm_gem_object_unreference(&ring->wa_ctx.obj->base); 1477 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1475 return ret; 1478 return ret;
1476 } 1479 }
1477 1480
1478 return 0; 1481 return 0;
1479} 1482}
1480 1483
1481static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring) 1484static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1482{ 1485{
1483 if (ring->wa_ctx.obj) { 1486 if (engine->wa_ctx.obj) {
1484 i915_gem_object_ggtt_unpin(ring->wa_ctx.obj); 1487 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1485 drm_gem_object_unreference(&ring->wa_ctx.obj->base); 1488 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1486 ring->wa_ctx.obj = NULL; 1489 engine->wa_ctx.obj = NULL;
1487 } 1490 }
1488} 1491}
1489 1492
1490static int intel_init_workaround_bb(struct intel_engine_cs *ring) 1493static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1491{ 1494{
1492 int ret; 1495 int ret;
1493 uint32_t *batch; 1496 uint32_t *batch;
1494 uint32_t offset; 1497 uint32_t offset;
1495 struct page *page; 1498 struct page *page;
1496 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 1499 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1497 1500
1498 WARN_ON(ring->id != RCS); 1501 WARN_ON(engine->id != RCS);
1499 1502
1500 /* update this when WA for higher Gen are added */ 1503 /* update this when WA for higher Gen are added */
1501 if (INTEL_INFO(ring->dev)->gen > 9) { 1504 if (INTEL_INFO(engine->dev)->gen > 9) {
1502 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1505 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1503 INTEL_INFO(ring->dev)->gen); 1506 INTEL_INFO(engine->dev)->gen);
1504 return 0; 1507 return 0;
1505 } 1508 }
1506 1509
1507 /* some WA perform writes to scratch page, ensure it is valid */ 1510 /* some WA perform writes to scratch page, ensure it is valid */
1508 if (ring->scratch.obj == NULL) { 1511 if (engine->scratch.obj == NULL) {
1509 DRM_ERROR("scratch page not allocated for %s\n", ring->name); 1512 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1510 return -EINVAL; 1513 return -EINVAL;
1511 } 1514 }
1512 1515
1513 ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE); 1516 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1514 if (ret) { 1517 if (ret) {
1515 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1518 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1516 return ret; 1519 return ret;
@@ -1520,29 +1523,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1520 batch = kmap_atomic(page); 1523 batch = kmap_atomic(page);
1521 offset = 0; 1524 offset = 0;
1522 1525
1523 if (INTEL_INFO(ring->dev)->gen == 8) { 1526 if (INTEL_INFO(engine->dev)->gen == 8) {
1524 ret = gen8_init_indirectctx_bb(ring, 1527 ret = gen8_init_indirectctx_bb(engine,
1525 &wa_ctx->indirect_ctx, 1528 &wa_ctx->indirect_ctx,
1526 batch, 1529 batch,
1527 &offset); 1530 &offset);
1528 if (ret) 1531 if (ret)
1529 goto out; 1532 goto out;
1530 1533
1531 ret = gen8_init_perctx_bb(ring, 1534 ret = gen8_init_perctx_bb(engine,
1532 &wa_ctx->per_ctx, 1535 &wa_ctx->per_ctx,
1533 batch, 1536 batch,
1534 &offset); 1537 &offset);
1535 if (ret) 1538 if (ret)
1536 goto out; 1539 goto out;
1537 } else if (INTEL_INFO(ring->dev)->gen == 9) { 1540 } else if (INTEL_INFO(engine->dev)->gen == 9) {
1538 ret = gen9_init_indirectctx_bb(ring, 1541 ret = gen9_init_indirectctx_bb(engine,
1539 &wa_ctx->indirect_ctx, 1542 &wa_ctx->indirect_ctx,
1540 batch, 1543 batch,
1541 &offset); 1544 &offset);
1542 if (ret) 1545 if (ret)
1543 goto out; 1546 goto out;
1544 1547
1545 ret = gen9_init_perctx_bb(ring, 1548 ret = gen9_init_perctx_bb(engine,
1546 &wa_ctx->per_ctx, 1549 &wa_ctx->per_ctx,
1547 batch, 1550 batch,
1548 &offset); 1551 &offset);
@@ -1553,27 +1556,28 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1553out: 1556out:
1554 kunmap_atomic(batch); 1557 kunmap_atomic(batch);
1555 if (ret) 1558 if (ret)
1556 lrc_destroy_wa_ctx_obj(ring); 1559 lrc_destroy_wa_ctx_obj(engine);
1557 1560
1558 return ret; 1561 return ret;
1559} 1562}
1560 1563
1561static int gen8_init_common_ring(struct intel_engine_cs *ring) 1564static int gen8_init_common_ring(struct intel_engine_cs *engine)
1562{ 1565{
1563 struct drm_device *dev = ring->dev; 1566 struct drm_device *dev = engine->dev;
1564 struct drm_i915_private *dev_priv = dev->dev_private; 1567 struct drm_i915_private *dev_priv = dev->dev_private;
1565 unsigned int next_context_status_buffer_hw; 1568 unsigned int next_context_status_buffer_hw;
1566 1569
1567 lrc_setup_hardware_status_page(ring, 1570 lrc_setup_hardware_status_page(engine,
1568 dev_priv->kernel_context->engine[ring->id].state); 1571 dev_priv->kernel_context->engine[engine->id].state);
1569 1572
1570 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1573 I915_WRITE_IMR(engine,
1571 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1574 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1575 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1572 1576
1573 I915_WRITE(RING_MODE_GEN7(ring), 1577 I915_WRITE(RING_MODE_GEN7(engine),
1574 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1578 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1575 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1579 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1576 POSTING_READ(RING_MODE_GEN7(ring)); 1580 POSTING_READ(RING_MODE_GEN7(engine));
1577 1581
1578 /* 1582 /*
1579 * Instead of resetting the Context Status Buffer (CSB) read pointer to 1583 * Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1588,7 +1592,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1588 * BXT | ? | ? | 1592 * BXT | ? | ? |
1589 */ 1593 */
1590 next_context_status_buffer_hw = 1594 next_context_status_buffer_hw =
1591 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); 1595 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1592 1596
1593 /* 1597 /*
1594 * When the CSB registers are reset (also after power-up / gpu reset), 1598 * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1598,21 +1602,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1598 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) 1602 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1599 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); 1603 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1600 1604
1601 ring->next_context_status_buffer = next_context_status_buffer_hw; 1605 engine->next_context_status_buffer = next_context_status_buffer_hw;
1602 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); 1606 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1603 1607
1604 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 1608 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
1605 1609
1606 return 0; 1610 return 0;
1607} 1611}
1608 1612
1609static int gen8_init_render_ring(struct intel_engine_cs *ring) 1613static int gen8_init_render_ring(struct intel_engine_cs *engine)
1610{ 1614{
1611 struct drm_device *dev = ring->dev; 1615 struct drm_device *dev = engine->dev;
1612 struct drm_i915_private *dev_priv = dev->dev_private; 1616 struct drm_i915_private *dev_priv = dev->dev_private;
1613 int ret; 1617 int ret;
1614 1618
1615 ret = gen8_init_common_ring(ring); 1619 ret = gen8_init_common_ring(engine);
1616 if (ret) 1620 if (ret)
1617 return ret; 1621 return ret;
1618 1622
@@ -1626,18 +1630,18 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1626 1630
1627 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1631 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1628 1632
1629 return init_workarounds_ring(ring); 1633 return init_workarounds_ring(engine);
1630} 1634}
1631 1635
1632static int gen9_init_render_ring(struct intel_engine_cs *ring) 1636static int gen9_init_render_ring(struct intel_engine_cs *engine)
1633{ 1637{
1634 int ret; 1638 int ret;
1635 1639
1636 ret = gen8_init_common_ring(ring); 1640 ret = gen8_init_common_ring(engine);
1637 if (ret) 1641 if (ret)
1638 return ret; 1642 return ret;
1639 1643
1640 return init_workarounds_ring(ring); 1644 return init_workarounds_ring(engine);
1641} 1645}
1642 1646
1643static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1647static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1712,9 +1716,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1712 return 0; 1716 return 0;
1713} 1717}
1714 1718
1715static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) 1719static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1716{ 1720{
1717 struct drm_device *dev = ring->dev; 1721 struct drm_device *dev = engine->dev;
1718 struct drm_i915_private *dev_priv = dev->dev_private; 1722 struct drm_i915_private *dev_priv = dev->dev_private;
1719 unsigned long flags; 1723 unsigned long flags;
1720 1724
@@ -1722,25 +1726,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1722 return false; 1726 return false;
1723 1727
1724 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1728 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1725 if (ring->irq_refcount++ == 0) { 1729 if (engine->irq_refcount++ == 0) {
1726 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1730 I915_WRITE_IMR(engine,
1727 POSTING_READ(RING_IMR(ring->mmio_base)); 1731 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1732 POSTING_READ(RING_IMR(engine->mmio_base));
1728 } 1733 }
1729 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1734 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1730 1735
1731 return true; 1736 return true;
1732} 1737}
1733 1738
1734static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) 1739static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1735{ 1740{
1736 struct drm_device *dev = ring->dev; 1741 struct drm_device *dev = engine->dev;
1737 struct drm_i915_private *dev_priv = dev->dev_private; 1742 struct drm_i915_private *dev_priv = dev->dev_private;
1738 unsigned long flags; 1743 unsigned long flags;
1739 1744
1740 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1745 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1741 if (--ring->irq_refcount == 0) { 1746 if (--engine->irq_refcount == 0) {
1742 I915_WRITE_IMR(ring, ~ring->irq_keep_mask); 1747 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1743 POSTING_READ(RING_IMR(ring->mmio_base)); 1748 POSTING_READ(RING_IMR(engine->mmio_base));
1744 } 1749 }
1745 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1750 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1746} 1751}
@@ -1848,17 +1853,18 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1848 return 0; 1853 return 0;
1849} 1854}
1850 1855
1851static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1856static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1852{ 1857{
1853 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1858 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1854} 1859}
1855 1860
1856static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1861static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1857{ 1862{
1858 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1863 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1859} 1864}
1860 1865
1861static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1866static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
1867 bool lazy_coherency)
1862{ 1868{
1863 1869
1864 /* 1870 /*
@@ -1873,17 +1879,17 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1873 */ 1879 */
1874 1880
1875 if (!lazy_coherency) 1881 if (!lazy_coherency)
1876 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); 1882 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1877 1883
1878 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1884 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1879} 1885}
1880 1886
1881static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1887static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1882{ 1888{
1883 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1889 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1884 1890
1885 /* See bxt_a_get_seqno() explaining the reason for the clflush. */ 1891 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1886 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); 1892 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1887} 1893}
1888 1894
1889/* 1895/*
@@ -2002,109 +2008,109 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
2002 * @ring: Engine Command Streamer. 2008 * @ring: Engine Command Streamer.
2003 * 2009 *
2004 */ 2010 */
2005void intel_logical_ring_cleanup(struct intel_engine_cs *ring) 2011void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
2006{ 2012{
2007 struct drm_i915_private *dev_priv; 2013 struct drm_i915_private *dev_priv;
2008 2014
2009 if (!intel_ring_initialized(ring)) 2015 if (!intel_ring_initialized(engine))
2010 return; 2016 return;
2011 2017
2012 dev_priv = ring->dev->dev_private; 2018 dev_priv = engine->dev->dev_private;
2013 2019
2014 if (ring->buffer) { 2020 if (engine->buffer) {
2015 intel_logical_ring_stop(ring); 2021 intel_logical_ring_stop(engine);
2016 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 2022 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
2017 } 2023 }
2018 2024
2019 if (ring->cleanup) 2025 if (engine->cleanup)
2020 ring->cleanup(ring); 2026 engine->cleanup(engine);
2021 2027
2022 i915_cmd_parser_fini_ring(ring); 2028 i915_cmd_parser_fini_ring(engine);
2023 i915_gem_batch_pool_fini(&ring->batch_pool); 2029 i915_gem_batch_pool_fini(&engine->batch_pool);
2024 2030
2025 if (ring->status_page.obj) { 2031 if (engine->status_page.obj) {
2026 kunmap(sg_page(ring->status_page.obj->pages->sgl)); 2032 kunmap(sg_page(engine->status_page.obj->pages->sgl));
2027 ring->status_page.obj = NULL; 2033 engine->status_page.obj = NULL;
2028 } 2034 }
2029 2035
2030 ring->idle_lite_restore_wa = 0; 2036 engine->idle_lite_restore_wa = 0;
2031 ring->disable_lite_restore_wa = false; 2037 engine->disable_lite_restore_wa = false;
2032 ring->ctx_desc_template = 0; 2038 engine->ctx_desc_template = 0;
2033 2039
2034 lrc_destroy_wa_ctx_obj(ring); 2040 lrc_destroy_wa_ctx_obj(engine);
2035 ring->dev = NULL; 2041 engine->dev = NULL;
2036} 2042}
2037 2043
2038static void 2044static void
2039logical_ring_default_vfuncs(struct drm_device *dev, 2045logical_ring_default_vfuncs(struct drm_device *dev,
2040 struct intel_engine_cs *ring) 2046 struct intel_engine_cs *engine)
2041{ 2047{
2042 /* Default vfuncs which can be overriden by each engine. */ 2048 /* Default vfuncs which can be overriden by each engine. */
2043 ring->init_hw = gen8_init_common_ring; 2049 engine->init_hw = gen8_init_common_ring;
2044 ring->emit_request = gen8_emit_request; 2050 engine->emit_request = gen8_emit_request;
2045 ring->emit_flush = gen8_emit_flush; 2051 engine->emit_flush = gen8_emit_flush;
2046 ring->irq_get = gen8_logical_ring_get_irq; 2052 engine->irq_get = gen8_logical_ring_get_irq;
2047 ring->irq_put = gen8_logical_ring_put_irq; 2053 engine->irq_put = gen8_logical_ring_put_irq;
2048 ring->emit_bb_start = gen8_emit_bb_start; 2054 engine->emit_bb_start = gen8_emit_bb_start;
2049 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 2055 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2050 ring->get_seqno = bxt_a_get_seqno; 2056 engine->get_seqno = bxt_a_get_seqno;
2051 ring->set_seqno = bxt_a_set_seqno; 2057 engine->set_seqno = bxt_a_set_seqno;
2052 } else { 2058 } else {
2053 ring->get_seqno = gen8_get_seqno; 2059 engine->get_seqno = gen8_get_seqno;
2054 ring->set_seqno = gen8_set_seqno; 2060 engine->set_seqno = gen8_set_seqno;
2055 } 2061 }
2056} 2062}
2057 2063
2058static inline void 2064static inline void
2059logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) 2065logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
2060{ 2066{
2061 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 2067 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2062 ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 2068 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2063} 2069}
2064 2070
2065static int 2071static int
2066logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) 2072logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2067{ 2073{
2068 struct intel_context *dctx = to_i915(dev)->kernel_context; 2074 struct intel_context *dctx = to_i915(dev)->kernel_context;
2069 int ret; 2075 int ret;
2070 2076
2071 /* Intentionally left blank. */ 2077 /* Intentionally left blank. */
2072 ring->buffer = NULL; 2078 engine->buffer = NULL;
2073 2079
2074 ring->dev = dev; 2080 engine->dev = dev;
2075 INIT_LIST_HEAD(&ring->active_list); 2081 INIT_LIST_HEAD(&engine->active_list);
2076 INIT_LIST_HEAD(&ring->request_list); 2082 INIT_LIST_HEAD(&engine->request_list);
2077 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2083 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2078 init_waitqueue_head(&ring->irq_queue); 2084 init_waitqueue_head(&engine->irq_queue);
2079 2085
2080 INIT_LIST_HEAD(&ring->buffers); 2086 INIT_LIST_HEAD(&engine->buffers);
2081 INIT_LIST_HEAD(&ring->execlist_queue); 2087 INIT_LIST_HEAD(&engine->execlist_queue);
2082 INIT_LIST_HEAD(&ring->execlist_retired_req_list); 2088 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2083 spin_lock_init(&ring->execlist_lock); 2089 spin_lock_init(&engine->execlist_lock);
2084 2090
2085 logical_ring_init_platform_invariants(ring); 2091 logical_ring_init_platform_invariants(engine);
2086 2092
2087 ret = i915_cmd_parser_init_ring(ring); 2093 ret = i915_cmd_parser_init_ring(engine);
2088 if (ret) 2094 if (ret)
2089 goto error; 2095 goto error;
2090 2096
2091 ret = intel_lr_context_deferred_alloc(dctx, ring); 2097 ret = intel_lr_context_deferred_alloc(dctx, engine);
2092 if (ret) 2098 if (ret)
2093 goto error; 2099 goto error;
2094 2100
2095 /* As this is the default context, always pin it */ 2101 /* As this is the default context, always pin it */
2096 ret = intel_lr_context_do_pin(dctx, ring); 2102 ret = intel_lr_context_do_pin(dctx, engine);
2097 if (ret) { 2103 if (ret) {
2098 DRM_ERROR( 2104 DRM_ERROR(
2099 "Failed to pin and map ringbuffer %s: %d\n", 2105 "Failed to pin and map ringbuffer %s: %d\n",
2100 ring->name, ret); 2106 engine->name, ret);
2101 goto error; 2107 goto error;
2102 } 2108 }
2103 2109
2104 return 0; 2110 return 0;
2105 2111
2106error: 2112error:
2107 intel_logical_ring_cleanup(ring); 2113 intel_logical_ring_cleanup(engine);
2108 return ret; 2114 return ret;
2109} 2115}
2110 2116
@@ -2329,13 +2335,13 @@ make_rpcs(struct drm_device *dev)
2329 return rpcs; 2335 return rpcs;
2330} 2336}
2331 2337
2332static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) 2338static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2333{ 2339{
2334 u32 indirect_ctx_offset; 2340 u32 indirect_ctx_offset;
2335 2341
2336 switch (INTEL_INFO(ring->dev)->gen) { 2342 switch (INTEL_INFO(engine->dev)->gen) {
2337 default: 2343 default:
2338 MISSING_CASE(INTEL_INFO(ring->dev)->gen); 2344 MISSING_CASE(INTEL_INFO(engine->dev)->gen);
2339 /* fall through */ 2345 /* fall through */
2340 case 9: 2346 case 9:
2341 indirect_ctx_offset = 2347 indirect_ctx_offset =
@@ -2352,9 +2358,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
2352 2358
2353static int 2359static int
2354populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, 2360populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2355 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) 2361 struct intel_engine_cs *engine,
2362 struct intel_ringbuffer *ringbuf)
2356{ 2363{
2357 struct drm_device *dev = ring->dev; 2364 struct drm_device *dev = engine->dev;
2358 struct drm_i915_private *dev_priv = dev->dev_private; 2365 struct drm_i915_private *dev_priv = dev->dev_private;
2359 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2366 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2360 struct page *page; 2367 struct page *page;
@@ -2389,33 +2396,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2389 * recreate this batchbuffer with new values (including all the missing 2396 * recreate this batchbuffer with new values (including all the missing
2390 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2397 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2391 reg_state[CTX_LRI_HEADER_0] = 2398 reg_state[CTX_LRI_HEADER_0] =
2392 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; 2399 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2393 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), 2400 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2401 RING_CONTEXT_CONTROL(engine),
2394 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2402 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2395 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2403 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2396 (HAS_RESOURCE_STREAMER(dev) ? 2404 (HAS_RESOURCE_STREAMER(dev) ?
2397 CTX_CTRL_RS_CTX_ENABLE : 0))); 2405 CTX_CTRL_RS_CTX_ENABLE : 0)));
2398 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); 2406 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2399 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); 2407 0);
2408 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2409 0);
2400 /* Ring buffer start address is not known until the buffer is pinned. 2410 /* Ring buffer start address is not known until the buffer is pinned.
2401 * It is written to the context image in execlists_update_context() 2411 * It is written to the context image in execlists_update_context()
2402 */ 2412 */
2403 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0); 2413 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2404 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base), 2414 RING_START(engine->mmio_base), 0);
2415 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2416 RING_CTL(engine->mmio_base),
2405 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); 2417 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2406 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0); 2418 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2407 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0); 2419 RING_BBADDR_UDW(engine->mmio_base), 0);
2408 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base), 2420 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2421 RING_BBADDR(engine->mmio_base), 0);
2422 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2423 RING_BBSTATE(engine->mmio_base),
2409 RING_BB_PPGTT); 2424 RING_BB_PPGTT);
2410 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0); 2425 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2411 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0); 2426 RING_SBBADDR_UDW(engine->mmio_base), 0);
2412 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0); 2427 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2413 if (ring->id == RCS) { 2428 RING_SBBADDR(engine->mmio_base), 0);
2414 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0); 2429 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2415 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0); 2430 RING_SBBSTATE(engine->mmio_base), 0);
2416 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0); 2431 if (engine->id == RCS) {
2417 if (ring->wa_ctx.obj) { 2432 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2418 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 2433 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2434 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2435 RING_INDIRECT_CTX(engine->mmio_base), 0);
2436 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2437 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2438 if (engine->wa_ctx.obj) {
2439 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2419 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); 2440 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2420 2441
2421 reg_state[CTX_RCS_INDIRECT_CTX+1] = 2442 reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2423,7 +2444,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2423 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 2444 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2424 2445
2425 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 2446 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2426 intel_lr_indirect_ctx_offset(ring) << 6; 2447 intel_lr_indirect_ctx_offset(engine) << 6;
2427 2448
2428 reg_state[CTX_BB_PER_CTX_PTR+1] = 2449 reg_state[CTX_BB_PER_CTX_PTR+1] =
2429 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 2450 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2431,16 +2452,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2431 } 2452 }
2432 } 2453 }
2433 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2454 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2434 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0); 2455 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2456 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2435 /* PDP values well be assigned later if needed */ 2457 /* PDP values well be assigned later if needed */
2436 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0); 2458 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2437 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0); 2459 0);
2438 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0); 2460 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2439 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0); 2461 0);
2440 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0); 2462 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2441 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0); 2463 0);
2442 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); 2464 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2443 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); 2465 0);
2466 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2467 0);
2468 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2469 0);
2470 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2471 0);
2472 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2473 0);
2444 2474
2445 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2475 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2446 /* 64b PPGTT (48bit canonical) 2476 /* 64b PPGTT (48bit canonical)
@@ -2457,7 +2487,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2457 execlists_update_context_pdps(ppgtt, reg_state); 2487 execlists_update_context_pdps(ppgtt, reg_state);
2458 } 2488 }
2459 2489
2460 if (ring->id == RCS) { 2490 if (engine->id == RCS) {
2461 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2491 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2462 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2492 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2463 make_rpcs(dev)); 2493 make_rpcs(dev));
@@ -2513,15 +2543,15 @@ void intel_lr_context_free(struct intel_context *ctx)
2513 * in LRC mode, but does not include the "shared data page" used with 2543 * in LRC mode, but does not include the "shared data page" used with
2514 * GuC submission. The caller should account for this if using the GuC. 2544 * GuC submission. The caller should account for this if using the GuC.
2515 */ 2545 */
2516uint32_t intel_lr_context_size(struct intel_engine_cs *ring) 2546uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2517{ 2547{
2518 int ret = 0; 2548 int ret = 0;
2519 2549
2520 WARN_ON(INTEL_INFO(ring->dev)->gen < 8); 2550 WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
2521 2551
2522 switch (ring->id) { 2552 switch (engine->id) {
2523 case RCS: 2553 case RCS:
2524 if (INTEL_INFO(ring->dev)->gen >= 9) 2554 if (INTEL_INFO(engine->dev)->gen >= 9)
2525 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2555 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2526 else 2556 else
2527 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2557 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2537,22 +2567,22 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
2537 return ret; 2567 return ret;
2538} 2568}
2539 2569
2540static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, 2570static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
2541 struct drm_i915_gem_object *default_ctx_obj) 2571 struct drm_i915_gem_object *default_ctx_obj)
2542{ 2572{
2543 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2573 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2544 struct page *page; 2574 struct page *page;
2545 2575
2546 /* The HWSP is part of the default context object in LRC mode. */ 2576 /* The HWSP is part of the default context object in LRC mode. */
2547 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) 2577 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2548 + LRC_PPHWSP_PN * PAGE_SIZE; 2578 + LRC_PPHWSP_PN * PAGE_SIZE;
2549 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); 2579 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2550 ring->status_page.page_addr = kmap(page); 2580 engine->status_page.page_addr = kmap(page);
2551 ring->status_page.obj = default_ctx_obj; 2581 engine->status_page.obj = default_ctx_obj;
2552 2582
2553 I915_WRITE(RING_HWS_PGA(ring->mmio_base), 2583 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
2554 (u32)ring->status_page.gfx_addr); 2584 (u32)engine->status_page.gfx_addr);
2555 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); 2585 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
2556} 2586}
2557 2587
2558/** 2588/**
@@ -2570,18 +2600,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
2570 */ 2600 */
2571 2601
2572int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2602int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2573 struct intel_engine_cs *ring) 2603 struct intel_engine_cs *engine)
2574{ 2604{
2575 struct drm_device *dev = ring->dev; 2605 struct drm_device *dev = engine->dev;
2576 struct drm_i915_gem_object *ctx_obj; 2606 struct drm_i915_gem_object *ctx_obj;
2577 uint32_t context_size; 2607 uint32_t context_size;
2578 struct intel_ringbuffer *ringbuf; 2608 struct intel_ringbuffer *ringbuf;
2579 int ret; 2609 int ret;
2580 2610
2581 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2611 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
2582 WARN_ON(ctx->engine[ring->id].state); 2612 WARN_ON(ctx->engine[engine->id].state);
2583 2613
2584 context_size = round_up(intel_lr_context_size(ring), 4096); 2614 context_size = round_up(intel_lr_context_size(engine), 4096);
2585 2615
2586 /* One extra page as the sharing data between driver and GuC */ 2616 /* One extra page as the sharing data between driver and GuC */
2587 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2617 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2592,32 +2622,32 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2592 return -ENOMEM; 2622 return -ENOMEM;
2593 } 2623 }
2594 2624
2595 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); 2625 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
2596 if (IS_ERR(ringbuf)) { 2626 if (IS_ERR(ringbuf)) {
2597 ret = PTR_ERR(ringbuf); 2627 ret = PTR_ERR(ringbuf);
2598 goto error_deref_obj; 2628 goto error_deref_obj;
2599 } 2629 }
2600 2630
2601 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 2631 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2602 if (ret) { 2632 if (ret) {
2603 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2633 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2604 goto error_ringbuf; 2634 goto error_ringbuf;
2605 } 2635 }
2606 2636
2607 ctx->engine[ring->id].ringbuf = ringbuf; 2637 ctx->engine[engine->id].ringbuf = ringbuf;
2608 ctx->engine[ring->id].state = ctx_obj; 2638 ctx->engine[engine->id].state = ctx_obj;
2609 2639
2610 if (ctx != ctx->i915->kernel_context && ring->init_context) { 2640 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2611 struct drm_i915_gem_request *req; 2641 struct drm_i915_gem_request *req;
2612 2642
2613 req = i915_gem_request_alloc(ring, ctx); 2643 req = i915_gem_request_alloc(engine, ctx);
2614 if (IS_ERR(req)) { 2644 if (IS_ERR(req)) {
2615 ret = PTR_ERR(req); 2645 ret = PTR_ERR(req);
2616 DRM_ERROR("ring create req: %d\n", ret); 2646 DRM_ERROR("ring create req: %d\n", ret);
2617 goto error_ringbuf; 2647 goto error_ringbuf;
2618 } 2648 }
2619 2649
2620 ret = ring->init_context(req); 2650 ret = engine->init_context(req);
2621 if (ret) { 2651 if (ret) {
2622 DRM_ERROR("ring init context: %d\n", 2652 DRM_ERROR("ring init context: %d\n",
2623 ret); 2653 ret);
@@ -2632,8 +2662,8 @@ error_ringbuf:
2632 intel_ringbuffer_free(ringbuf); 2662 intel_ringbuffer_free(ringbuf);
2633error_deref_obj: 2663error_deref_obj:
2634 drm_gem_object_unreference(&ctx_obj->base); 2664 drm_gem_object_unreference(&ctx_obj->base);
2635 ctx->engine[ring->id].ringbuf = NULL; 2665 ctx->engine[engine->id].ringbuf = NULL;
2636 ctx->engine[ring->id].state = NULL; 2666 ctx->engine[engine->id].state = NULL;
2637 return ret; 2667 return ret;
2638} 2668}
2639 2669