aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-03-16 07:00:37 -0400
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-03-16 11:33:10 -0400
commit0bc40be85f33ca1795253a5f8674efb430f83cce (patch)
tree6d0546ca8e222bb60fd0fe222f1cd1438ef319fa /drivers
parente2f80391478af71bbbc91686fe0efc580b907caa (diff)
drm/i915: Rename intel_engine_cs function parameters
@@ identifier func; @@ func(..., struct intel_engine_cs * - ring + engine , ...) { <... - ring + engine ...> } @@ identifier func; type T; @@ T func(..., struct intel_engine_cs * - ring + engine , ...); Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c122
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c97
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c566
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c618
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h54
15 files changed, 909 insertions, 867 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894ed925..2c50142be559 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -555,7 +555,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
555 return 0; 555 return 0;
556} 556}
557 557
558static bool validate_cmds_sorted(struct intel_engine_cs *ring, 558static bool validate_cmds_sorted(struct intel_engine_cs *engine,
559 const struct drm_i915_cmd_table *cmd_tables, 559 const struct drm_i915_cmd_table *cmd_tables,
560 int cmd_table_count) 560 int cmd_table_count)
561{ 561{
@@ -577,7 +577,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
577 577
578 if (curr < previous) { 578 if (curr < previous) {
579 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", 579 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
580 ring->id, i, j, curr, previous); 580 engine->id, i, j, curr, previous);
581 ret = false; 581 ret = false;
582 } 582 }
583 583
@@ -611,11 +611,11 @@ static bool check_sorted(int ring_id,
611 return ret; 611 return ret;
612} 612}
613 613
614static bool validate_regs_sorted(struct intel_engine_cs *ring) 614static bool validate_regs_sorted(struct intel_engine_cs *engine)
615{ 615{
616 return check_sorted(ring->id, ring->reg_table, ring->reg_count) && 616 return check_sorted(engine->id, engine->reg_table, engine->reg_count) &&
617 check_sorted(ring->id, ring->master_reg_table, 617 check_sorted(engine->id, engine->master_reg_table,
618 ring->master_reg_count); 618 engine->master_reg_count);
619} 619}
620 620
621struct cmd_node { 621struct cmd_node {
@@ -639,13 +639,13 @@ struct cmd_node {
639 */ 639 */
640#define CMD_HASH_MASK STD_MI_OPCODE_MASK 640#define CMD_HASH_MASK STD_MI_OPCODE_MASK
641 641
642static int init_hash_table(struct intel_engine_cs *ring, 642static int init_hash_table(struct intel_engine_cs *engine,
643 const struct drm_i915_cmd_table *cmd_tables, 643 const struct drm_i915_cmd_table *cmd_tables,
644 int cmd_table_count) 644 int cmd_table_count)
645{ 645{
646 int i, j; 646 int i, j;
647 647
648 hash_init(ring->cmd_hash); 648 hash_init(engine->cmd_hash);
649 649
650 for (i = 0; i < cmd_table_count; i++) { 650 for (i = 0; i < cmd_table_count; i++) {
651 const struct drm_i915_cmd_table *table = &cmd_tables[i]; 651 const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +660,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
660 return -ENOMEM; 660 return -ENOMEM;
661 661
662 desc_node->desc = desc; 662 desc_node->desc = desc;
663 hash_add(ring->cmd_hash, &desc_node->node, 663 hash_add(engine->cmd_hash, &desc_node->node,
664 desc->cmd.value & CMD_HASH_MASK); 664 desc->cmd.value & CMD_HASH_MASK);
665 } 665 }
666 } 666 }
@@ -668,13 +668,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
668 return 0; 668 return 0;
669} 669}
670 670
671static void fini_hash_table(struct intel_engine_cs *ring) 671static void fini_hash_table(struct intel_engine_cs *engine)
672{ 672{
673 struct hlist_node *tmp; 673 struct hlist_node *tmp;
674 struct cmd_node *desc_node; 674 struct cmd_node *desc_node;
675 int i; 675 int i;
676 676
677 hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) { 677 hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
678 hash_del(&desc_node->node); 678 hash_del(&desc_node->node);
679 kfree(desc_node); 679 kfree(desc_node);
680 } 680 }
@@ -690,18 +690,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
690 * 690 *
691 * Return: non-zero if initialization fails 691 * Return: non-zero if initialization fails
692 */ 692 */
693int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) 693int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
694{ 694{
695 const struct drm_i915_cmd_table *cmd_tables; 695 const struct drm_i915_cmd_table *cmd_tables;
696 int cmd_table_count; 696 int cmd_table_count;
697 int ret; 697 int ret;
698 698
699 if (!IS_GEN7(ring->dev)) 699 if (!IS_GEN7(engine->dev))
700 return 0; 700 return 0;
701 701
702 switch (ring->id) { 702 switch (engine->id) {
703 case RCS: 703 case RCS:
704 if (IS_HASWELL(ring->dev)) { 704 if (IS_HASWELL(engine->dev)) {
705 cmd_tables = hsw_render_ring_cmds; 705 cmd_tables = hsw_render_ring_cmds;
706 cmd_table_count = 706 cmd_table_count =
707 ARRAY_SIZE(hsw_render_ring_cmds); 707 ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +710,26 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
710 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 710 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
711 } 711 }
712 712
713 ring->reg_table = gen7_render_regs; 713 engine->reg_table = gen7_render_regs;
714 ring->reg_count = ARRAY_SIZE(gen7_render_regs); 714 engine->reg_count = ARRAY_SIZE(gen7_render_regs);
715 715
716 if (IS_HASWELL(ring->dev)) { 716 if (IS_HASWELL(engine->dev)) {
717 ring->master_reg_table = hsw_master_regs; 717 engine->master_reg_table = hsw_master_regs;
718 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); 718 engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
719 } else { 719 } else {
720 ring->master_reg_table = ivb_master_regs; 720 engine->master_reg_table = ivb_master_regs;
721 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); 721 engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
722 } 722 }
723 723
724 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; 724 engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
725 break; 725 break;
726 case VCS: 726 case VCS:
727 cmd_tables = gen7_video_cmds; 727 cmd_tables = gen7_video_cmds;
728 cmd_table_count = ARRAY_SIZE(gen7_video_cmds); 728 cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
729 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 729 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
730 break; 730 break;
731 case BCS: 731 case BCS:
732 if (IS_HASWELL(ring->dev)) { 732 if (IS_HASWELL(engine->dev)) {
733 cmd_tables = hsw_blt_ring_cmds; 733 cmd_tables = hsw_blt_ring_cmds;
734 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 734 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
735 } else { 735 } else {
@@ -737,44 +737,44 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
737 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 737 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
738 } 738 }
739 739
740 ring->reg_table = gen7_blt_regs; 740 engine->reg_table = gen7_blt_regs;
741 ring->reg_count = ARRAY_SIZE(gen7_blt_regs); 741 engine->reg_count = ARRAY_SIZE(gen7_blt_regs);
742 742
743 if (IS_HASWELL(ring->dev)) { 743 if (IS_HASWELL(engine->dev)) {
744 ring->master_reg_table = hsw_master_regs; 744 engine->master_reg_table = hsw_master_regs;
745 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); 745 engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
746 } else { 746 } else {
747 ring->master_reg_table = ivb_master_regs; 747 engine->master_reg_table = ivb_master_regs;
748 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); 748 engine->master_reg_count = ARRAY_SIZE(ivb_master_regs);
749 } 749 }
750 750
751 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; 751 engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
752 break; 752 break;
753 case VECS: 753 case VECS:
754 cmd_tables = hsw_vebox_cmds; 754 cmd_tables = hsw_vebox_cmds;
755 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); 755 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
756 /* VECS can use the same length_mask function as VCS */ 756 /* VECS can use the same length_mask function as VCS */
757 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 757 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
758 break; 758 break;
759 default: 759 default:
760 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n", 760 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
761 ring->id); 761 engine->id);
762 BUG(); 762 BUG();
763 } 763 }
764 764
765 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); 765 BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
766 BUG_ON(!validate_regs_sorted(ring)); 766 BUG_ON(!validate_regs_sorted(engine));
767 767
768 WARN_ON(!hash_empty(ring->cmd_hash)); 768 WARN_ON(!hash_empty(engine->cmd_hash));
769 769
770 ret = init_hash_table(ring, cmd_tables, cmd_table_count); 770 ret = init_hash_table(engine, cmd_tables, cmd_table_count);
771 if (ret) { 771 if (ret) {
772 DRM_ERROR("CMD: cmd_parser_init failed!\n"); 772 DRM_ERROR("CMD: cmd_parser_init failed!\n");
773 fini_hash_table(ring); 773 fini_hash_table(engine);
774 return ret; 774 return ret;
775 } 775 }
776 776
777 ring->needs_cmd_parser = true; 777 engine->needs_cmd_parser = true;
778 778
779 return 0; 779 return 0;
780} 780}
@@ -786,21 +786,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
786 * Releases any resources related to command parsing that may have been 786 * Releases any resources related to command parsing that may have been
787 * initialized for the specified ring. 787 * initialized for the specified ring.
788 */ 788 */
789void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring) 789void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
790{ 790{
791 if (!ring->needs_cmd_parser) 791 if (!engine->needs_cmd_parser)
792 return; 792 return;
793 793
794 fini_hash_table(ring); 794 fini_hash_table(engine);
795} 795}
796 796
797static const struct drm_i915_cmd_descriptor* 797static const struct drm_i915_cmd_descriptor*
798find_cmd_in_table(struct intel_engine_cs *ring, 798find_cmd_in_table(struct intel_engine_cs *engine,
799 u32 cmd_header) 799 u32 cmd_header)
800{ 800{
801 struct cmd_node *desc_node; 801 struct cmd_node *desc_node;
802 802
803 hash_for_each_possible(ring->cmd_hash, desc_node, node, 803 hash_for_each_possible(engine->cmd_hash, desc_node, node,
804 cmd_header & CMD_HASH_MASK) { 804 cmd_header & CMD_HASH_MASK) {
805 const struct drm_i915_cmd_descriptor *desc = desc_node->desc; 805 const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
806 u32 masked_cmd = desc->cmd.mask & cmd_header; 806 u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +822,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
822 * ring's default length encoding and returns default_desc. 822 * ring's default length encoding and returns default_desc.
823 */ 823 */
824static const struct drm_i915_cmd_descriptor* 824static const struct drm_i915_cmd_descriptor*
825find_cmd(struct intel_engine_cs *ring, 825find_cmd(struct intel_engine_cs *engine,
826 u32 cmd_header, 826 u32 cmd_header,
827 struct drm_i915_cmd_descriptor *default_desc) 827 struct drm_i915_cmd_descriptor *default_desc)
828{ 828{
829 const struct drm_i915_cmd_descriptor *desc; 829 const struct drm_i915_cmd_descriptor *desc;
830 u32 mask; 830 u32 mask;
831 831
832 desc = find_cmd_in_table(ring, cmd_header); 832 desc = find_cmd_in_table(engine, cmd_header);
833 if (desc) 833 if (desc)
834 return desc; 834 return desc;
835 835
836 mask = ring->get_cmd_length_mask(cmd_header); 836 mask = engine->get_cmd_length_mask(cmd_header);
837 if (!mask) 837 if (!mask)
838 return NULL; 838 return NULL;
839 839
@@ -963,18 +963,18 @@ unpin_src:
963 * 963 *
964 * Return: true if the ring requires software command parsing 964 * Return: true if the ring requires software command parsing
965 */ 965 */
966bool i915_needs_cmd_parser(struct intel_engine_cs *ring) 966bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
967{ 967{
968 if (!ring->needs_cmd_parser) 968 if (!engine->needs_cmd_parser)
969 return false; 969 return false;
970 970
971 if (!USES_PPGTT(ring->dev)) 971 if (!USES_PPGTT(engine->dev))
972 return false; 972 return false;
973 973
974 return (i915.enable_cmd_parser == 1); 974 return (i915.enable_cmd_parser == 1);
975} 975}
976 976
977static bool check_cmd(const struct intel_engine_cs *ring, 977static bool check_cmd(const struct intel_engine_cs *engine,
978 const struct drm_i915_cmd_descriptor *desc, 978 const struct drm_i915_cmd_descriptor *desc,
979 const u32 *cmd, u32 length, 979 const u32 *cmd, u32 length,
980 const bool is_master, 980 const bool is_master,
@@ -1004,17 +1004,17 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1004 offset += step) { 1004 offset += step) {
1005 const u32 reg_addr = cmd[offset] & desc->reg.mask; 1005 const u32 reg_addr = cmd[offset] & desc->reg.mask;
1006 const struct drm_i915_reg_descriptor *reg = 1006 const struct drm_i915_reg_descriptor *reg =
1007 find_reg(ring->reg_table, ring->reg_count, 1007 find_reg(engine->reg_table, engine->reg_count,
1008 reg_addr); 1008 reg_addr);
1009 1009
1010 if (!reg && is_master) 1010 if (!reg && is_master)
1011 reg = find_reg(ring->master_reg_table, 1011 reg = find_reg(engine->master_reg_table,
1012 ring->master_reg_count, 1012 engine->master_reg_count,
1013 reg_addr); 1013 reg_addr);
1014 1014
1015 if (!reg) { 1015 if (!reg) {
1016 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", 1016 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
1017 reg_addr, *cmd, ring->id); 1017 reg_addr, *cmd, engine->id);
1018 return false; 1018 return false;
1019 } 1019 }
1020 1020
@@ -1087,7 +1087,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1087 *cmd, 1087 *cmd,
1088 desc->bits[i].mask, 1088 desc->bits[i].mask,
1089 desc->bits[i].expected, 1089 desc->bits[i].expected,
1090 dword, ring->id); 1090 dword, engine->id);
1091 return false; 1091 return false;
1092 } 1092 }
1093 } 1093 }
@@ -1113,7 +1113,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
1113 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES 1113 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1114 * if the batch appears legal but should use hardware parsing 1114 * if the batch appears legal but should use hardware parsing
1115 */ 1115 */
1116int i915_parse_cmds(struct intel_engine_cs *ring, 1116int i915_parse_cmds(struct intel_engine_cs *engine,
1117 struct drm_i915_gem_object *batch_obj, 1117 struct drm_i915_gem_object *batch_obj,
1118 struct drm_i915_gem_object *shadow_batch_obj, 1118 struct drm_i915_gem_object *shadow_batch_obj,
1119 u32 batch_start_offset, 1119 u32 batch_start_offset,
@@ -1147,7 +1147,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1147 if (*cmd == MI_BATCH_BUFFER_END) 1147 if (*cmd == MI_BATCH_BUFFER_END)
1148 break; 1148 break;
1149 1149
1150 desc = find_cmd(ring, *cmd, &default_desc); 1150 desc = find_cmd(engine, *cmd, &default_desc);
1151 if (!desc) { 1151 if (!desc) {
1152 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", 1152 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1153 *cmd); 1153 *cmd);
@@ -1179,7 +1179,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1179 break; 1179 break;
1180 } 1180 }
1181 1181
1182 if (!check_cmd(ring, desc, cmd, length, is_master, 1182 if (!check_cmd(engine, desc, cmd, length, is_master,
1183 &oacontrol_set)) { 1183 &oacontrol_set)) {
1184 ret = -EINVAL; 1184 ret = -EINVAL;
1185 break; 1185 break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5037ccb18e77..164e1432d41f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -725,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
725} 725}
726 726
727static void i915_ring_seqno_info(struct seq_file *m, 727static void i915_ring_seqno_info(struct seq_file *m,
728 struct intel_engine_cs *ring) 728 struct intel_engine_cs *engine)
729{ 729{
730 if (ring->get_seqno) { 730 if (engine->get_seqno) {
731 seq_printf(m, "Current sequence (%s): %x\n", 731 seq_printf(m, "Current sequence (%s): %x\n",
732 ring->name, ring->get_seqno(ring, false)); 732 engine->name, engine->get_seqno(engine, false));
733 } 733 }
734} 734}
735 735
@@ -1992,22 +1992,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
1992 1992
1993static void i915_dump_lrc_obj(struct seq_file *m, 1993static void i915_dump_lrc_obj(struct seq_file *m,
1994 struct intel_context *ctx, 1994 struct intel_context *ctx,
1995 struct intel_engine_cs *ring) 1995 struct intel_engine_cs *engine)
1996{ 1996{
1997 struct page *page; 1997 struct page *page;
1998 uint32_t *reg_state; 1998 uint32_t *reg_state;
1999 int j; 1999 int j;
2000 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 2000 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2001 unsigned long ggtt_offset = 0; 2001 unsigned long ggtt_offset = 0;
2002 2002
2003 if (ctx_obj == NULL) { 2003 if (ctx_obj == NULL) {
2004 seq_printf(m, "Context on %s with no gem object\n", 2004 seq_printf(m, "Context on %s with no gem object\n",
2005 ring->name); 2005 engine->name);
2006 return; 2006 return;
2007 } 2007 }
2008 2008
2009 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 2009 seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2010 intel_execlists_ctx_id(ctx, ring)); 2010 intel_execlists_ctx_id(ctx, engine));
2011 2011
2012 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2012 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2013 seq_puts(m, "\tNot bound in GGTT\n"); 2013 seq_puts(m, "\tNot bound in GGTT\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 80b14f1ba302..8d87242ce601 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2964,10 +2964,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2964int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2964int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2965 2965
2966struct drm_i915_gem_request * 2966struct drm_i915_gem_request *
2967i915_gem_find_active_request(struct intel_engine_cs *ring); 2967i915_gem_find_active_request(struct intel_engine_cs *engine);
2968 2968
2969bool i915_gem_retire_requests(struct drm_device *dev); 2969bool i915_gem_retire_requests(struct drm_device *dev);
2970void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2970void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
2971int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2971int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2972 bool interruptible); 2972 bool interruptible);
2973 2973
@@ -3297,10 +3297,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3297 3297
3298/* i915_cmd_parser.c */ 3298/* i915_cmd_parser.c */
3299int i915_cmd_parser_get_version(void); 3299int i915_cmd_parser_get_version(void);
3300int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3300int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
3301void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3301void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
3302bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3302bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
3303int i915_parse_cmds(struct intel_engine_cs *ring, 3303int i915_parse_cmds(struct intel_engine_cs *engine,
3304 struct drm_i915_gem_object *batch_obj, 3304 struct drm_i915_gem_object *batch_obj,
3305 struct drm_i915_gem_object *shadow_batch_obj, 3305 struct drm_i915_gem_object *shadow_batch_obj,
3306 u32 batch_start_offset, 3306 u32 batch_start_offset,
@@ -3571,11 +3571,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3571 } 3571 }
3572} 3572}
3573 3573
3574static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3574static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
3575 struct drm_i915_gem_request *req) 3575 struct drm_i915_gem_request *req)
3576{ 3576{
3577 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3577 if (engine->trace_irq_req == NULL && engine->irq_get(engine))
3578 i915_gem_request_assign(&ring->trace_irq_req, req); 3578 i915_gem_request_assign(&engine->trace_irq_req, req);
3579} 3579}
3580 3580
3581#endif 3581#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5a7f6032f066..1119b8f46f09 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data)
1141} 1141}
1142 1142
1143static bool missed_irq(struct drm_i915_private *dev_priv, 1143static bool missed_irq(struct drm_i915_private *dev_priv,
1144 struct intel_engine_cs *ring) 1144 struct intel_engine_cs *engine)
1145{ 1145{
1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1146 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1147} 1147}
1148 1148
1149static unsigned long local_clock_us(unsigned *cpu) 1149static unsigned long local_clock_us(unsigned *cpu)
@@ -2689,11 +2689,11 @@ void i915_gem_request_free(struct kref *req_ref)
2689} 2689}
2690 2690
2691static inline int 2691static inline int
2692__i915_gem_request_alloc(struct intel_engine_cs *ring, 2692__i915_gem_request_alloc(struct intel_engine_cs *engine,
2693 struct intel_context *ctx, 2693 struct intel_context *ctx,
2694 struct drm_i915_gem_request **req_out) 2694 struct drm_i915_gem_request **req_out)
2695{ 2695{
2696 struct drm_i915_private *dev_priv = to_i915(ring->dev); 2696 struct drm_i915_private *dev_priv = to_i915(engine->dev);
2697 struct drm_i915_gem_request *req; 2697 struct drm_i915_gem_request *req;
2698 int ret; 2698 int ret;
2699 2699
@@ -2706,13 +2706,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
2706 if (req == NULL) 2706 if (req == NULL)
2707 return -ENOMEM; 2707 return -ENOMEM;
2708 2708
2709 ret = i915_gem_get_seqno(ring->dev, &req->seqno); 2709 ret = i915_gem_get_seqno(engine->dev, &req->seqno);
2710 if (ret) 2710 if (ret)
2711 goto err; 2711 goto err;
2712 2712
2713 kref_init(&req->ref); 2713 kref_init(&req->ref);
2714 req->i915 = dev_priv; 2714 req->i915 = dev_priv;
2715 req->ring = ring; 2715 req->ring = engine;
2716 req->ctx = ctx; 2716 req->ctx = ctx;
2717 i915_gem_context_reference(req->ctx); 2717 i915_gem_context_reference(req->ctx);
2718 2718
@@ -2787,11 +2787,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
2787} 2787}
2788 2788
2789struct drm_i915_gem_request * 2789struct drm_i915_gem_request *
2790i915_gem_find_active_request(struct intel_engine_cs *ring) 2790i915_gem_find_active_request(struct intel_engine_cs *engine)
2791{ 2791{
2792 struct drm_i915_gem_request *request; 2792 struct drm_i915_gem_request *request;
2793 2793
2794 list_for_each_entry(request, &ring->request_list, list) { 2794 list_for_each_entry(request, &engine->request_list, list) {
2795 if (i915_gem_request_completed(request, false)) 2795 if (i915_gem_request_completed(request, false))
2796 continue; 2796 continue;
2797 2797
@@ -2802,37 +2802,37 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
2802} 2802}
2803 2803
2804static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2804static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2805 struct intel_engine_cs *ring) 2805 struct intel_engine_cs *engine)
2806{ 2806{
2807 struct drm_i915_gem_request *request; 2807 struct drm_i915_gem_request *request;
2808 bool ring_hung; 2808 bool ring_hung;
2809 2809
2810 request = i915_gem_find_active_request(ring); 2810 request = i915_gem_find_active_request(engine);
2811 2811
2812 if (request == NULL) 2812 if (request == NULL)
2813 return; 2813 return;
2814 2814
2815 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2815 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2816 2816
2817 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 2817 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2818 2818
2819 list_for_each_entry_continue(request, &ring->request_list, list) 2819 list_for_each_entry_continue(request, &engine->request_list, list)
2820 i915_set_reset_status(dev_priv, request->ctx, false); 2820 i915_set_reset_status(dev_priv, request->ctx, false);
2821} 2821}
2822 2822
2823static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2823static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2824 struct intel_engine_cs *ring) 2824 struct intel_engine_cs *engine)
2825{ 2825{
2826 struct intel_ringbuffer *buffer; 2826 struct intel_ringbuffer *buffer;
2827 2827
2828 while (!list_empty(&ring->active_list)) { 2828 while (!list_empty(&engine->active_list)) {
2829 struct drm_i915_gem_object *obj; 2829 struct drm_i915_gem_object *obj;
2830 2830
2831 obj = list_first_entry(&ring->active_list, 2831 obj = list_first_entry(&engine->active_list,
2832 struct drm_i915_gem_object, 2832 struct drm_i915_gem_object,
2833 ring_list[ring->id]); 2833 ring_list[engine->id]);
2834 2834
2835 i915_gem_object_retire__read(obj, ring->id); 2835 i915_gem_object_retire__read(obj, engine->id);
2836 } 2836 }
2837 2837
2838 /* 2838 /*
@@ -2842,14 +2842,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2842 */ 2842 */
2843 2843
2844 if (i915.enable_execlists) { 2844 if (i915.enable_execlists) {
2845 spin_lock_irq(&ring->execlist_lock); 2845 spin_lock_irq(&engine->execlist_lock);
2846 2846
2847 /* list_splice_tail_init checks for empty lists */ 2847 /* list_splice_tail_init checks for empty lists */
2848 list_splice_tail_init(&ring->execlist_queue, 2848 list_splice_tail_init(&engine->execlist_queue,
2849 &ring->execlist_retired_req_list); 2849 &engine->execlist_retired_req_list);
2850 2850
2851 spin_unlock_irq(&ring->execlist_lock); 2851 spin_unlock_irq(&engine->execlist_lock);
2852 intel_execlists_retire_requests(ring); 2852 intel_execlists_retire_requests(engine);
2853 } 2853 }
2854 2854
2855 /* 2855 /*
@@ -2859,10 +2859,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2859 * implicit references on things like e.g. ppgtt address spaces through 2859 * implicit references on things like e.g. ppgtt address spaces through
2860 * the request. 2860 * the request.
2861 */ 2861 */
2862 while (!list_empty(&ring->request_list)) { 2862 while (!list_empty(&engine->request_list)) {
2863 struct drm_i915_gem_request *request; 2863 struct drm_i915_gem_request *request;
2864 2864
2865 request = list_first_entry(&ring->request_list, 2865 request = list_first_entry(&engine->request_list,
2866 struct drm_i915_gem_request, 2866 struct drm_i915_gem_request,
2867 list); 2867 list);
2868 2868
@@ -2876,7 +2876,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2876 * upon reset is less than when we start. Do one more pass over 2876 * upon reset is less than when we start. Do one more pass over
2877 * all the ringbuffers to reset last_retired_head. 2877 * all the ringbuffers to reset last_retired_head.
2878 */ 2878 */
2879 list_for_each_entry(buffer, &ring->buffers, link) { 2879 list_for_each_entry(buffer, &engine->buffers, link) {
2880 buffer->last_retired_head = buffer->tail; 2880 buffer->last_retired_head = buffer->tail;
2881 intel_ring_update_space(buffer); 2881 intel_ring_update_space(buffer);
2882 } 2882 }
@@ -2910,19 +2910,19 @@ void i915_gem_reset(struct drm_device *dev)
2910 * This function clears the request list as sequence numbers are passed. 2910 * This function clears the request list as sequence numbers are passed.
2911 */ 2911 */
2912void 2912void
2913i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2913i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2914{ 2914{
2915 WARN_ON(i915_verify_lists(ring->dev)); 2915 WARN_ON(i915_verify_lists(engine->dev));
2916 2916
2917 /* Retire requests first as we use it above for the early return. 2917 /* Retire requests first as we use it above for the early return.
2918 * If we retire requests last, we may use a later seqno and so clear 2918 * If we retire requests last, we may use a later seqno and so clear
2919 * the requests lists without clearing the active list, leading to 2919 * the requests lists without clearing the active list, leading to
2920 * confusion. 2920 * confusion.
2921 */ 2921 */
2922 while (!list_empty(&ring->request_list)) { 2922 while (!list_empty(&engine->request_list)) {
2923 struct drm_i915_gem_request *request; 2923 struct drm_i915_gem_request *request;
2924 2924
2925 request = list_first_entry(&ring->request_list, 2925 request = list_first_entry(&engine->request_list,
2926 struct drm_i915_gem_request, 2926 struct drm_i915_gem_request,
2927 list); 2927 list);
2928 2928
@@ -2936,26 +2936,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2936 * by the ringbuffer to the flushing/inactive lists as appropriate, 2936 * by the ringbuffer to the flushing/inactive lists as appropriate,
2937 * before we free the context associated with the requests. 2937 * before we free the context associated with the requests.
2938 */ 2938 */
2939 while (!list_empty(&ring->active_list)) { 2939 while (!list_empty(&engine->active_list)) {
2940 struct drm_i915_gem_object *obj; 2940 struct drm_i915_gem_object *obj;
2941 2941
2942 obj = list_first_entry(&ring->active_list, 2942 obj = list_first_entry(&engine->active_list,
2943 struct drm_i915_gem_object, 2943 struct drm_i915_gem_object,
2944 ring_list[ring->id]); 2944 ring_list[engine->id]);
2945 2945
2946 if (!list_empty(&obj->last_read_req[ring->id]->list)) 2946 if (!list_empty(&obj->last_read_req[engine->id]->list))
2947 break; 2947 break;
2948 2948
2949 i915_gem_object_retire__read(obj, ring->id); 2949 i915_gem_object_retire__read(obj, engine->id);
2950 } 2950 }
2951 2951
2952 if (unlikely(ring->trace_irq_req && 2952 if (unlikely(engine->trace_irq_req &&
2953 i915_gem_request_completed(ring->trace_irq_req, true))) { 2953 i915_gem_request_completed(engine->trace_irq_req, true))) {
2954 ring->irq_put(ring); 2954 engine->irq_put(engine);
2955 i915_gem_request_assign(&ring->trace_irq_req, NULL); 2955 i915_gem_request_assign(&engine->trace_irq_req, NULL);
2956 } 2956 }
2957 2957
2958 WARN_ON(i915_verify_lists(ring->dev)); 2958 WARN_ON(i915_verify_lists(engine->dev));
2959} 2959}
2960 2960
2961bool 2961bool
@@ -5022,10 +5022,10 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
5022} 5022}
5023 5023
5024static void 5024static void
5025init_ring_lists(struct intel_engine_cs *ring) 5025init_ring_lists(struct intel_engine_cs *engine)
5026{ 5026{
5027 INIT_LIST_HEAD(&ring->active_list); 5027 INIT_LIST_HEAD(&engine->active_list);
5028 INIT_LIST_HEAD(&ring->request_list); 5028 INIT_LIST_HEAD(&engine->request_list);
5029} 5029}
5030 5030
5031void 5031void
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index cc07666c2d91..44f582988094 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -600,7 +600,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
600 return ret; 600 return ret;
601} 601}
602 602
603static inline bool should_skip_switch(struct intel_engine_cs *ring, 603static inline bool should_skip_switch(struct intel_engine_cs *engine,
604 struct intel_context *from, 604 struct intel_context *from,
605 struct intel_context *to) 605 struct intel_context *to)
606{ 606{
@@ -608,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
608 return false; 608 return false;
609 609
610 if (to->ppgtt && from == to && 610 if (to->ppgtt && from == to &&
611 !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) 611 !(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings))
612 return true; 612 return true;
613 613
614 return false; 614 return false;
615} 615}
616 616
617static bool 617static bool
618needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) 618needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
619{ 619{
620 struct drm_i915_private *dev_priv = ring->dev->dev_private; 620 struct drm_i915_private *dev_priv = engine->dev->dev_private;
621 621
622 if (!to->ppgtt) 622 if (!to->ppgtt)
623 return false; 623 return false;
624 624
625 if (INTEL_INFO(ring->dev)->gen < 8) 625 if (INTEL_INFO(engine->dev)->gen < 8)
626 return true; 626 return true;
627 627
628 if (ring != &dev_priv->ring[RCS]) 628 if (engine != &dev_priv->ring[RCS])
629 return true; 629 return true;
630 630
631 return false; 631 return false;
632} 632}
633 633
634static bool 634static bool
635needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, 635needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
636 u32 hw_flags) 636 u32 hw_flags)
637{ 637{
638 struct drm_i915_private *dev_priv = ring->dev->dev_private; 638 struct drm_i915_private *dev_priv = engine->dev->dev_private;
639 639
640 if (!to->ppgtt) 640 if (!to->ppgtt)
641 return false; 641 return false;
642 642
643 if (!IS_GEN8(ring->dev)) 643 if (!IS_GEN8(engine->dev))
644 return false; 644 return false;
645 645
646 if (ring != &dev_priv->ring[RCS]) 646 if (engine != &dev_priv->ring[RCS])
647 return false; 647 return false;
648 648
649 if (hw_flags & MI_RESTORE_INHIBIT) 649 if (hw_flags & MI_RESTORE_INHIBIT)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b73496ea5583..f94d756828e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
599 599
600static int 600static int
601i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, 601i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
602 struct intel_engine_cs *ring, 602 struct intel_engine_cs *engine,
603 bool *need_reloc) 603 bool *need_reloc)
604{ 604{
605 struct drm_i915_gem_object *obj = vma->obj; 605 struct drm_i915_gem_object *obj = vma->obj;
@@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma)
713} 713}
714 714
715static int 715static int
716i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, 716i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
717 struct list_head *vmas, 717 struct list_head *vmas,
718 struct intel_context *ctx, 718 struct intel_context *ctx,
719 bool *need_relocs) 719 bool *need_relocs)
@@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
723 struct i915_address_space *vm; 723 struct i915_address_space *vm;
724 struct list_head ordered_vmas; 724 struct list_head ordered_vmas;
725 struct list_head pinned_vmas; 725 struct list_head pinned_vmas;
726 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 726 bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
727 int retry; 727 int retry;
728 728
729 i915_gem_retire_requests_ring(ring); 729 i915_gem_retire_requests_ring(engine);
730 730
731 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 731 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
732 732
@@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
788 if (eb_vma_misplaced(vma)) 788 if (eb_vma_misplaced(vma))
789 ret = i915_vma_unbind(vma); 789 ret = i915_vma_unbind(vma);
790 else 790 else
791 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 791 ret = i915_gem_execbuffer_reserve_vma(vma,
792 engine,
793 need_relocs);
792 if (ret) 794 if (ret)
793 goto err; 795 goto err;
794 } 796 }
@@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
798 if (drm_mm_node_allocated(&vma->node)) 800 if (drm_mm_node_allocated(&vma->node))
799 continue; 801 continue;
800 802
801 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 803 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
804 need_relocs);
802 if (ret) 805 if (ret)
803 goto err; 806 goto err;
804 } 807 }
@@ -821,7 +824,7 @@ static int
821i915_gem_execbuffer_relocate_slow(struct drm_device *dev, 824i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
822 struct drm_i915_gem_execbuffer2 *args, 825 struct drm_i915_gem_execbuffer2 *args,
823 struct drm_file *file, 826 struct drm_file *file,
824 struct intel_engine_cs *ring, 827 struct intel_engine_cs *engine,
825 struct eb_vmas *eb, 828 struct eb_vmas *eb,
826 struct drm_i915_gem_exec_object2 *exec, 829 struct drm_i915_gem_exec_object2 *exec,
827 struct intel_context *ctx) 830 struct intel_context *ctx)
@@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
910 goto err; 913 goto err;
911 914
912 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 915 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
913 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); 916 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
917 &need_relocs);
914 if (ret) 918 if (ret)
915 goto err; 919 goto err;
916 920
@@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev,
1062 1066
1063static struct intel_context * 1067static struct intel_context *
1064i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 1068i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1065 struct intel_engine_cs *ring, const u32 ctx_id) 1069 struct intel_engine_cs *engine, const u32 ctx_id)
1066{ 1070{
1067 struct intel_context *ctx = NULL; 1071 struct intel_context *ctx = NULL;
1068 struct i915_ctx_hang_stats *hs; 1072 struct i915_ctx_hang_stats *hs;
1069 1073
1070 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) 1074 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1071 return ERR_PTR(-EINVAL); 1075 return ERR_PTR(-EINVAL);
1072 1076
1073 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 1077 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1080 return ERR_PTR(-EIO); 1084 return ERR_PTR(-EIO);
1081 } 1085 }
1082 1086
1083 if (i915.enable_execlists && !ctx->engine[ring->id].state) { 1087 if (i915.enable_execlists && !ctx->engine[engine->id].state) {
1084 int ret = intel_lr_context_deferred_alloc(ctx, ring); 1088 int ret = intel_lr_context_deferred_alloc(ctx, engine);
1085 if (ret) { 1089 if (ret) {
1086 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); 1090 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1087 return ERR_PTR(ret); 1091 return ERR_PTR(ret);
@@ -1171,7 +1175,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1171} 1175}
1172 1176
1173static struct drm_i915_gem_object* 1177static struct drm_i915_gem_object*
1174i915_gem_execbuffer_parse(struct intel_engine_cs *ring, 1178i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1175 struct drm_i915_gem_exec_object2 *shadow_exec_entry, 1179 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1176 struct eb_vmas *eb, 1180 struct eb_vmas *eb,
1177 struct drm_i915_gem_object *batch_obj, 1181 struct drm_i915_gem_object *batch_obj,
@@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1183 struct i915_vma *vma; 1187 struct i915_vma *vma;
1184 int ret; 1188 int ret;
1185 1189
1186 shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, 1190 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1187 PAGE_ALIGN(batch_len)); 1191 PAGE_ALIGN(batch_len));
1188 if (IS_ERR(shadow_batch_obj)) 1192 if (IS_ERR(shadow_batch_obj))
1189 return shadow_batch_obj; 1193 return shadow_batch_obj;
1190 1194
1191 ret = i915_parse_cmds(ring, 1195 ret = i915_parse_cmds(engine,
1192 batch_obj, 1196 batch_obj,
1193 shadow_batch_obj, 1197 shadow_batch_obj,
1194 batch_start_offset, 1198 batch_start_offset,
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index fc7e6d5c6251..b21f72ec895c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
169 drm_gem_object_unreference(&so->obj->base); 169 drm_gem_object_unreference(&so->obj->base);
170} 170}
171 171
172int i915_gem_render_state_prepare(struct intel_engine_cs *ring, 172int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
173 struct render_state *so) 173 struct render_state *so)
174{ 174{
175 int ret; 175 int ret;
176 176
177 if (WARN_ON(ring->id != RCS)) 177 if (WARN_ON(engine->id != RCS))
178 return -ENOENT; 178 return -ENOENT;
179 179
180 ret = render_state_init(so, ring->dev); 180 ret = render_state_init(so, engine->dev);
181 if (ret) 181 if (ret)
182 return ret; 182 return ret;
183 183
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index e641bb093a90..6aaa3a10a630 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -43,7 +43,7 @@ struct render_state {
43 43
44int i915_gem_render_state_init(struct drm_i915_gem_request *req); 44int i915_gem_render_state_init(struct drm_i915_gem_request *req);
45void i915_gem_render_state_fini(struct render_state *so); 45void i915_gem_render_state_fini(struct render_state *so);
46int i915_gem_render_state_prepare(struct intel_engine_cs *ring, 46int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
47 struct render_state *so); 47 struct render_state *so);
48 48
49#endif /* _I915_GEM_RENDER_STATE_H_ */ 49#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d97cadcfccb1..029ed4031edf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -842,7 +842,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
842 842
843static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, 843static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
844 struct drm_i915_error_state *error, 844 struct drm_i915_error_state *error,
845 struct intel_engine_cs *ring, 845 struct intel_engine_cs *engine,
846 struct drm_i915_error_ring *ering) 846 struct drm_i915_error_ring *ering)
847{ 847{
848 struct intel_engine_cs *to; 848 struct intel_engine_cs *to;
@@ -861,63 +861,64 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
861 u16 signal_offset; 861 u16 signal_offset;
862 u32 *tmp; 862 u32 *tmp;
863 863
864 if (ring == to) 864 if (engine == to)
865 continue; 865 continue;
866 866
867 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) 867 signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1))
868 / 4; 868 / 4;
869 tmp = error->semaphore_obj->pages[0]; 869 tmp = error->semaphore_obj->pages[0];
870 idx = intel_ring_sync_index(ring, to); 870 idx = intel_ring_sync_index(engine, to);
871 871
872 ering->semaphore_mboxes[idx] = tmp[signal_offset]; 872 ering->semaphore_mboxes[idx] = tmp[signal_offset];
873 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; 873 ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
874 } 874 }
875} 875}
876 876
877static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, 877static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
878 struct intel_engine_cs *ring, 878 struct intel_engine_cs *engine,
879 struct drm_i915_error_ring *ering) 879 struct drm_i915_error_ring *ering)
880{ 880{
881 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); 881 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
882 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); 882 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
883 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; 883 ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
884 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; 884 ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
885 885
886 if (HAS_VEBOX(dev_priv->dev)) { 886 if (HAS_VEBOX(dev_priv->dev)) {
887 ering->semaphore_mboxes[2] = 887 ering->semaphore_mboxes[2] =
888 I915_READ(RING_SYNC_2(ring->mmio_base)); 888 I915_READ(RING_SYNC_2(engine->mmio_base));
889 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; 889 ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
890 } 890 }
891} 891}
892 892
893static void i915_record_ring_state(struct drm_device *dev, 893static void i915_record_ring_state(struct drm_device *dev,
894 struct drm_i915_error_state *error, 894 struct drm_i915_error_state *error,
895 struct intel_engine_cs *ring, 895 struct intel_engine_cs *engine,
896 struct drm_i915_error_ring *ering) 896 struct drm_i915_error_ring *ering)
897{ 897{
898 struct drm_i915_private *dev_priv = dev->dev_private; 898 struct drm_i915_private *dev_priv = dev->dev_private;
899 899
900 if (INTEL_INFO(dev)->gen >= 6) { 900 if (INTEL_INFO(dev)->gen >= 6) {
901 ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base)); 901 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
902 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 902 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
903 if (INTEL_INFO(dev)->gen >= 8) 903 if (INTEL_INFO(dev)->gen >= 8)
904 gen8_record_semaphore_state(dev_priv, error, ring, ering); 904 gen8_record_semaphore_state(dev_priv, error, engine,
905 ering);
905 else 906 else
906 gen6_record_semaphore_state(dev_priv, ring, ering); 907 gen6_record_semaphore_state(dev_priv, engine, ering);
907 } 908 }
908 909
909 if (INTEL_INFO(dev)->gen >= 4) { 910 if (INTEL_INFO(dev)->gen >= 4) {
910 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); 911 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
911 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); 912 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
912 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 913 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
913 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); 914 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
914 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); 915 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
915 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); 916 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
916 if (INTEL_INFO(dev)->gen >= 8) { 917 if (INTEL_INFO(dev)->gen >= 8) {
917 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32; 918 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
918 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 919 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
919 } 920 }
920 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); 921 ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
921 } else { 922 } else {
922 ering->faddr = I915_READ(DMA_FADD_I8XX); 923 ering->faddr = I915_READ(DMA_FADD_I8XX);
923 ering->ipeir = I915_READ(IPEIR); 924 ering->ipeir = I915_READ(IPEIR);
@@ -925,20 +926,20 @@ static void i915_record_ring_state(struct drm_device *dev,
925 ering->instdone = I915_READ(GEN2_INSTDONE); 926 ering->instdone = I915_READ(GEN2_INSTDONE);
926 } 927 }
927 928
928 ering->waiting = waitqueue_active(&ring->irq_queue); 929 ering->waiting = waitqueue_active(&engine->irq_queue);
929 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); 930 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
930 ering->seqno = ring->get_seqno(ring, false); 931 ering->seqno = engine->get_seqno(engine, false);
931 ering->acthd = intel_ring_get_active_head(ring); 932 ering->acthd = intel_ring_get_active_head(engine);
932 ering->start = I915_READ_START(ring); 933 ering->start = I915_READ_START(engine);
933 ering->head = I915_READ_HEAD(ring); 934 ering->head = I915_READ_HEAD(engine);
934 ering->tail = I915_READ_TAIL(ring); 935 ering->tail = I915_READ_TAIL(engine);
935 ering->ctl = I915_READ_CTL(ring); 936 ering->ctl = I915_READ_CTL(engine);
936 937
937 if (I915_NEED_GFX_HWS(dev)) { 938 if (I915_NEED_GFX_HWS(dev)) {
938 i915_reg_t mmio; 939 i915_reg_t mmio;
939 940
940 if (IS_GEN7(dev)) { 941 if (IS_GEN7(dev)) {
941 switch (ring->id) { 942 switch (engine->id) {
942 default: 943 default:
943 case RCS: 944 case RCS:
944 mmio = RENDER_HWS_PGA_GEN7; 945 mmio = RENDER_HWS_PGA_GEN7;
@@ -953,51 +954,51 @@ static void i915_record_ring_state(struct drm_device *dev,
953 mmio = VEBOX_HWS_PGA_GEN7; 954 mmio = VEBOX_HWS_PGA_GEN7;
954 break; 955 break;
955 } 956 }
956 } else if (IS_GEN6(ring->dev)) { 957 } else if (IS_GEN6(engine->dev)) {
957 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 958 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
958 } else { 959 } else {
959 /* XXX: gen8 returns to sanity */ 960 /* XXX: gen8 returns to sanity */
960 mmio = RING_HWS_PGA(ring->mmio_base); 961 mmio = RING_HWS_PGA(engine->mmio_base);
961 } 962 }
962 963
963 ering->hws = I915_READ(mmio); 964 ering->hws = I915_READ(mmio);
964 } 965 }
965 966
966 ering->hangcheck_score = ring->hangcheck.score; 967 ering->hangcheck_score = engine->hangcheck.score;
967 ering->hangcheck_action = ring->hangcheck.action; 968 ering->hangcheck_action = engine->hangcheck.action;
968 969
969 if (USES_PPGTT(dev)) { 970 if (USES_PPGTT(dev)) {
970 int i; 971 int i;
971 972
972 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 973 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
973 974
974 if (IS_GEN6(dev)) 975 if (IS_GEN6(dev))
975 ering->vm_info.pp_dir_base = 976 ering->vm_info.pp_dir_base =
976 I915_READ(RING_PP_DIR_BASE_READ(ring)); 977 I915_READ(RING_PP_DIR_BASE_READ(engine));
977 else if (IS_GEN7(dev)) 978 else if (IS_GEN7(dev))
978 ering->vm_info.pp_dir_base = 979 ering->vm_info.pp_dir_base =
979 I915_READ(RING_PP_DIR_BASE(ring)); 980 I915_READ(RING_PP_DIR_BASE(engine));
980 else if (INTEL_INFO(dev)->gen >= 8) 981 else if (INTEL_INFO(dev)->gen >= 8)
981 for (i = 0; i < 4; i++) { 982 for (i = 0; i < 4; i++) {
982 ering->vm_info.pdp[i] = 983 ering->vm_info.pdp[i] =
983 I915_READ(GEN8_RING_PDP_UDW(ring, i)); 984 I915_READ(GEN8_RING_PDP_UDW(engine, i));
984 ering->vm_info.pdp[i] <<= 32; 985 ering->vm_info.pdp[i] <<= 32;
985 ering->vm_info.pdp[i] |= 986 ering->vm_info.pdp[i] |=
986 I915_READ(GEN8_RING_PDP_LDW(ring, i)); 987 I915_READ(GEN8_RING_PDP_LDW(engine, i));
987 } 988 }
988 } 989 }
989} 990}
990 991
991 992
992static void i915_gem_record_active_context(struct intel_engine_cs *ring, 993static void i915_gem_record_active_context(struct intel_engine_cs *engine,
993 struct drm_i915_error_state *error, 994 struct drm_i915_error_state *error,
994 struct drm_i915_error_ring *ering) 995 struct drm_i915_error_ring *ering)
995{ 996{
996 struct drm_i915_private *dev_priv = ring->dev->dev_private; 997 struct drm_i915_private *dev_priv = engine->dev->dev_private;
997 struct drm_i915_gem_object *obj; 998 struct drm_i915_gem_object *obj;
998 999
999 /* Currently render ring is the only HW context user */ 1000 /* Currently render ring is the only HW context user */
1000 if (ring->id != RCS || !error->ccid) 1001 if (engine->id != RCS || !error->ccid)
1001 return; 1002 return;
1002 1003
1003 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1004 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f172de0a61bf..64658961a7e5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,14 +994,14 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
994 return; 994 return;
995} 995}
996 996
997static void notify_ring(struct intel_engine_cs *ring) 997static void notify_ring(struct intel_engine_cs *engine)
998{ 998{
999 if (!intel_ring_initialized(ring)) 999 if (!intel_ring_initialized(engine))
1000 return; 1000 return;
1001 1001
1002 trace_i915_gem_request_notify(ring); 1002 trace_i915_gem_request_notify(engine);
1003 1003
1004 wake_up_all(&ring->irq_queue); 1004 wake_up_all(&engine->irq_queue);
1005} 1005}
1006 1006
1007static void vlv_c0_read(struct drm_i915_private *dev_priv, 1007static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1319,12 +1319,12 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1319} 1319}
1320 1320
1321static __always_inline void 1321static __always_inline void
1322gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) 1322gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1323{ 1323{
1324 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1324 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1325 notify_ring(ring); 1325 notify_ring(engine);
1326 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1326 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1327 intel_lrc_irq_handler(ring); 1327 intel_lrc_irq_handler(engine);
1328} 1328}
1329 1329
1330static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1330static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
@@ -2805,10 +2805,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2805} 2805}
2806 2806
2807static bool 2807static bool
2808ring_idle(struct intel_engine_cs *ring, u32 seqno) 2808ring_idle(struct intel_engine_cs *engine, u32 seqno)
2809{ 2809{
2810 return (list_empty(&ring->request_list) || 2810 return (list_empty(&engine->request_list) ||
2811 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2811 i915_seqno_passed(seqno, engine->last_submitted_seqno));
2812} 2812}
2813 2813
2814static bool 2814static bool
@@ -2824,42 +2824,43 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2824} 2824}
2825 2825
2826static struct intel_engine_cs * 2826static struct intel_engine_cs *
2827semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2827semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2828 u64 offset)
2828{ 2829{
2829 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2830 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2830 struct intel_engine_cs *signaller; 2831 struct intel_engine_cs *signaller;
2831 int i; 2832 int i;
2832 2833
2833 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2834 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2834 for_each_ring(signaller, dev_priv, i) { 2835 for_each_ring(signaller, dev_priv, i) {
2835 if (ring == signaller) 2836 if (engine == signaller)
2836 continue; 2837 continue;
2837 2838
2838 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2839 if (offset == signaller->semaphore.signal_ggtt[engine->id])
2839 return signaller; 2840 return signaller;
2840 } 2841 }
2841 } else { 2842 } else {
2842 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2843 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2843 2844
2844 for_each_ring(signaller, dev_priv, i) { 2845 for_each_ring(signaller, dev_priv, i) {
2845 if(ring == signaller) 2846 if(engine == signaller)
2846 continue; 2847 continue;
2847 2848
2848 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2849 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2849 return signaller; 2850 return signaller;
2850 } 2851 }
2851 } 2852 }
2852 2853
2853 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2854 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2854 ring->id, ipehr, offset); 2855 engine->id, ipehr, offset);
2855 2856
2856 return NULL; 2857 return NULL;
2857} 2858}
2858 2859
2859static struct intel_engine_cs * 2860static struct intel_engine_cs *
2860semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2861semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2861{ 2862{
2862 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2863 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2863 u32 cmd, ipehr, head; 2864 u32 cmd, ipehr, head;
2864 u64 offset = 0; 2865 u64 offset = 0;
2865 int i, backwards; 2866 int i, backwards;
@@ -2881,11 +2882,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2881 * Therefore, this function does not support execlist mode in its 2882 * Therefore, this function does not support execlist mode in its
2882 * current form. Just return NULL and move on. 2883 * current form. Just return NULL and move on.
2883 */ 2884 */
2884 if (ring->buffer == NULL) 2885 if (engine->buffer == NULL)
2885 return NULL; 2886 return NULL;
2886 2887
2887 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2888 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2888 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2889 if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
2889 return NULL; 2890 return NULL;
2890 2891
2891 /* 2892 /*
@@ -2896,8 +2897,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2896 * point at at batch, and semaphores are always emitted into the 2897 * point at at batch, and semaphores are always emitted into the
2897 * ringbuffer itself. 2898 * ringbuffer itself.
2898 */ 2899 */
2899 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2900 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2900 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2901 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
2901 2902
2902 for (i = backwards; i; --i) { 2903 for (i = backwards; i; --i) {
2903 /* 2904 /*
@@ -2905,10 +2906,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2905 * our ring is smaller than what the hardware (and hence 2906 * our ring is smaller than what the hardware (and hence
2906 * HEAD_ADDR) allows. Also handles wrap-around. 2907 * HEAD_ADDR) allows. Also handles wrap-around.
2907 */ 2908 */
2908 head &= ring->buffer->size - 1; 2909 head &= engine->buffer->size - 1;
2909 2910
2910 /* This here seems to blow up */ 2911 /* This here seems to blow up */
2911 cmd = ioread32(ring->buffer->virtual_start + head); 2912 cmd = ioread32(engine->buffer->virtual_start + head);
2912 if (cmd == ipehr) 2913 if (cmd == ipehr)
2913 break; 2914 break;
2914 2915
@@ -2918,24 +2919,24 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2918 if (!i) 2919 if (!i)
2919 return NULL; 2920 return NULL;
2920 2921
2921 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2922 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2922 if (INTEL_INFO(ring->dev)->gen >= 8) { 2923 if (INTEL_INFO(engine->dev)->gen >= 8) {
2923 offset = ioread32(ring->buffer->virtual_start + head + 12); 2924 offset = ioread32(engine->buffer->virtual_start + head + 12);
2924 offset <<= 32; 2925 offset <<= 32;
2925 offset = ioread32(ring->buffer->virtual_start + head + 8); 2926 offset = ioread32(engine->buffer->virtual_start + head + 8);
2926 } 2927 }
2927 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2928 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2928} 2929}
2929 2930
2930static int semaphore_passed(struct intel_engine_cs *ring) 2931static int semaphore_passed(struct intel_engine_cs *engine)
2931{ 2932{
2932 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2933 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2933 struct intel_engine_cs *signaller; 2934 struct intel_engine_cs *signaller;
2934 u32 seqno; 2935 u32 seqno;
2935 2936
2936 ring->hangcheck.deadlock++; 2937 engine->hangcheck.deadlock++;
2937 2938
2938 signaller = semaphore_waits_for(ring, &seqno); 2939 signaller = semaphore_waits_for(engine, &seqno);
2939 if (signaller == NULL) 2940 if (signaller == NULL)
2940 return -1; 2941 return -1;
2941 2942
@@ -2963,16 +2964,16 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2963 engine->hangcheck.deadlock = 0; 2964 engine->hangcheck.deadlock = 0;
2964} 2965}
2965 2966
2966static bool subunits_stuck(struct intel_engine_cs *ring) 2967static bool subunits_stuck(struct intel_engine_cs *engine)
2967{ 2968{
2968 u32 instdone[I915_NUM_INSTDONE_REG]; 2969 u32 instdone[I915_NUM_INSTDONE_REG];
2969 bool stuck; 2970 bool stuck;
2970 int i; 2971 int i;
2971 2972
2972 if (ring->id != RCS) 2973 if (engine->id != RCS)
2973 return true; 2974 return true;
2974 2975
2975 i915_get_extra_instdone(ring->dev, instdone); 2976 i915_get_extra_instdone(engine->dev, instdone);
2976 2977
2977 /* There might be unstable subunit states even when 2978 /* There might be unstable subunit states even when
2978 * actual head is not moving. Filter out the unstable ones by 2979 * actual head is not moving. Filter out the unstable ones by
@@ -2981,44 +2982,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
2981 */ 2982 */
2982 stuck = true; 2983 stuck = true;
2983 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 2984 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2984 const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; 2985 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
2985 2986
2986 if (tmp != ring->hangcheck.instdone[i]) 2987 if (tmp != engine->hangcheck.instdone[i])
2987 stuck = false; 2988 stuck = false;
2988 2989
2989 ring->hangcheck.instdone[i] |= tmp; 2990 engine->hangcheck.instdone[i] |= tmp;
2990 } 2991 }
2991 2992
2992 return stuck; 2993 return stuck;
2993} 2994}
2994 2995
2995static enum intel_ring_hangcheck_action 2996static enum intel_ring_hangcheck_action
2996head_stuck(struct intel_engine_cs *ring, u64 acthd) 2997head_stuck(struct intel_engine_cs *engine, u64 acthd)
2997{ 2998{
2998 if (acthd != ring->hangcheck.acthd) { 2999 if (acthd != engine->hangcheck.acthd) {
2999 3000
3000 /* Clear subunit states on head movement */ 3001 /* Clear subunit states on head movement */
3001 memset(ring->hangcheck.instdone, 0, 3002 memset(engine->hangcheck.instdone, 0,
3002 sizeof(ring->hangcheck.instdone)); 3003 sizeof(engine->hangcheck.instdone));
3003 3004
3004 return HANGCHECK_ACTIVE; 3005 return HANGCHECK_ACTIVE;
3005 } 3006 }
3006 3007
3007 if (!subunits_stuck(ring)) 3008 if (!subunits_stuck(engine))
3008 return HANGCHECK_ACTIVE; 3009 return HANGCHECK_ACTIVE;
3009 3010
3010 return HANGCHECK_HUNG; 3011 return HANGCHECK_HUNG;
3011} 3012}
3012 3013
3013static enum intel_ring_hangcheck_action 3014static enum intel_ring_hangcheck_action
3014ring_stuck(struct intel_engine_cs *ring, u64 acthd) 3015ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3015{ 3016{
3016 struct drm_device *dev = ring->dev; 3017 struct drm_device *dev = engine->dev;
3017 struct drm_i915_private *dev_priv = dev->dev_private; 3018 struct drm_i915_private *dev_priv = dev->dev_private;
3018 enum intel_ring_hangcheck_action ha; 3019 enum intel_ring_hangcheck_action ha;
3019 u32 tmp; 3020 u32 tmp;
3020 3021
3021 ha = head_stuck(ring, acthd); 3022 ha = head_stuck(engine, acthd);
3022 if (ha != HANGCHECK_HUNG) 3023 if (ha != HANGCHECK_HUNG)
3023 return ha; 3024 return ha;
3024 3025
@@ -3030,24 +3031,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3030 * and break the hang. This should work on 3031 * and break the hang. This should work on
3031 * all but the second generation chipsets. 3032 * all but the second generation chipsets.
3032 */ 3033 */
3033 tmp = I915_READ_CTL(ring); 3034 tmp = I915_READ_CTL(engine);
3034 if (tmp & RING_WAIT) { 3035 if (tmp & RING_WAIT) {
3035 i915_handle_error(dev, false, 3036 i915_handle_error(dev, false,
3036 "Kicking stuck wait on %s", 3037 "Kicking stuck wait on %s",
3037 ring->name); 3038 engine->name);
3038 I915_WRITE_CTL(ring, tmp); 3039 I915_WRITE_CTL(engine, tmp);
3039 return HANGCHECK_KICK; 3040 return HANGCHECK_KICK;
3040 } 3041 }
3041 3042
3042 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3043 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3043 switch (semaphore_passed(ring)) { 3044 switch (semaphore_passed(engine)) {
3044 default: 3045 default:
3045 return HANGCHECK_HUNG; 3046 return HANGCHECK_HUNG;
3046 case 1: 3047 case 1:
3047 i915_handle_error(dev, false, 3048 i915_handle_error(dev, false,
3048 "Kicking stuck semaphore on %s", 3049 "Kicking stuck semaphore on %s",
3049 ring->name); 3050 engine->name);
3050 I915_WRITE_CTL(ring, tmp); 3051 I915_WRITE_CTL(engine, tmp);
3051 return HANGCHECK_KICK; 3052 return HANGCHECK_KICK;
3052 case 0: 3053 case 0:
3053 return HANGCHECK_WAIT; 3054 return HANGCHECK_WAIT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e95f2b7ed962..317b55b0b596 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11214,7 +11214,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11214 return 0; 11214 return 0;
11215} 11215}
11216 11216
11217static bool use_mmio_flip(struct intel_engine_cs *ring, 11217static bool use_mmio_flip(struct intel_engine_cs *engine,
11218 struct drm_i915_gem_object *obj) 11218 struct drm_i915_gem_object *obj)
11219{ 11219{
11220 /* 11220 /*
@@ -11225,10 +11225,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
11225 * So using MMIO flips there would disrupt this mechanism. 11225 * So using MMIO flips there would disrupt this mechanism.
11226 */ 11226 */
11227 11227
11228 if (ring == NULL) 11228 if (engine == NULL)
11229 return true; 11229 return true;
11230 11230
11231 if (INTEL_INFO(ring->dev)->gen < 5) 11231 if (INTEL_INFO(engine->dev)->gen < 5)
11232 return false; 11232 return false;
11233 11233
11234 if (i915.use_mmio_flip < 0) 11234 if (i915.use_mmio_flip < 0)
@@ -11242,7 +11242,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
11242 false)) 11242 false))
11243 return true; 11243 return true;
11244 else 11244 else
11245 return ring != i915_gem_request_get_ring(obj->last_write_req); 11245 return engine != i915_gem_request_get_ring(obj->last_write_req);
11246} 11246}
11247 11247
11248static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11248static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 448c68e69194..25514e91479a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -228,8 +228,8 @@ enum {
228 228
229static int intel_lr_context_pin(struct intel_context *ctx, 229static int intel_lr_context_pin(struct intel_context *ctx,
230 struct intel_engine_cs *engine); 230 struct intel_engine_cs *engine);
231static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, 231static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
232 struct drm_i915_gem_object *default_ctx_obj); 232 struct drm_i915_gem_object *default_ctx_obj);
233 233
234 234
235/** 235/**
@@ -266,23 +266,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266} 266}
267 267
268static void 268static void
269logical_ring_init_platform_invariants(struct intel_engine_cs *ring) 269logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
270{ 270{
271 struct drm_device *dev = ring->dev; 271 struct drm_device *dev = engine->dev;
272 272
273 if (IS_GEN8(dev) || IS_GEN9(dev)) 273 if (IS_GEN8(dev) || IS_GEN9(dev))
274 ring->idle_lite_restore_wa = ~0; 274 engine->idle_lite_restore_wa = ~0;
275 275
276 ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 276 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
278 (ring->id == VCS || ring->id == VCS2); 278 (engine->id == VCS || engine->id == VCS2);
279 279
280 ring->ctx_desc_template = GEN8_CTX_VALID; 280 engine->ctx_desc_template = GEN8_CTX_VALID;
281 ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 281 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
282 GEN8_CTX_ADDRESSING_MODE_SHIFT; 282 GEN8_CTX_ADDRESSING_MODE_SHIFT;
283 if (IS_GEN8(dev)) 283 if (IS_GEN8(dev))
284 ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 284 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285 ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 285 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
286 286
287 /* TODO: WaDisableLiteRestore when we start using semaphore 287 /* TODO: WaDisableLiteRestore when we start using semaphore
288 * signalling between Command Streamers */ 288 * signalling between Command Streamers */
@@ -290,8 +290,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
290 290
291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */ 291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ 292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
293 if (ring->disable_lite_restore_wa) 293 if (engine->disable_lite_restore_wa)
294 ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; 294 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
295} 295}
296 296
297/** 297/**
@@ -314,24 +314,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
314 */ 314 */
315static void 315static void
316intel_lr_context_descriptor_update(struct intel_context *ctx, 316intel_lr_context_descriptor_update(struct intel_context *ctx,
317 struct intel_engine_cs *ring) 317 struct intel_engine_cs *engine)
318{ 318{
319 uint64_t lrca, desc; 319 uint64_t lrca, desc;
320 320
321 lrca = ctx->engine[ring->id].lrc_vma->node.start + 321 lrca = ctx->engine[engine->id].lrc_vma->node.start +
322 LRC_PPHWSP_PN * PAGE_SIZE; 322 LRC_PPHWSP_PN * PAGE_SIZE;
323 323
324 desc = ring->ctx_desc_template; /* bits 0-11 */ 324 desc = engine->ctx_desc_template; /* bits 0-11 */
325 desc |= lrca; /* bits 12-31 */ 325 desc |= lrca; /* bits 12-31 */
326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
327 327
328 ctx->engine[ring->id].lrc_desc = desc; 328 ctx->engine[engine->id].lrc_desc = desc;
329} 329}
330 330
331uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 331uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
332 struct intel_engine_cs *ring) 332 struct intel_engine_cs *engine)
333{ 333{
334 return ctx->engine[ring->id].lrc_desc; 334 return ctx->engine[engine->id].lrc_desc;
335} 335}
336 336
337/** 337/**
@@ -351,9 +351,9 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
351 * Return: 20-bits globally unique context ID. 351 * Return: 20-bits globally unique context ID.
352 */ 352 */
353u32 intel_execlists_ctx_id(struct intel_context *ctx, 353u32 intel_execlists_ctx_id(struct intel_context *ctx,
354 struct intel_engine_cs *ring) 354 struct intel_engine_cs *engine)
355{ 355{
356 return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT; 356 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
357} 357}
358 358
359static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 359static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -424,21 +424,21 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
424 execlists_elsp_write(rq0, rq1); 424 execlists_elsp_write(rq0, rq1);
425} 425}
426 426
427static void execlists_context_unqueue__locked(struct intel_engine_cs *ring) 427static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
428{ 428{
429 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; 429 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
430 struct drm_i915_gem_request *cursor, *tmp; 430 struct drm_i915_gem_request *cursor, *tmp;
431 431
432 assert_spin_locked(&ring->execlist_lock); 432 assert_spin_locked(&engine->execlist_lock);
433 433
434 /* 434 /*
435 * If irqs are not active generate a warning as batches that finish 435 * If irqs are not active generate a warning as batches that finish
436 * without the irqs may get lost and a GPU Hang may occur. 436 * without the irqs may get lost and a GPU Hang may occur.
437 */ 437 */
438 WARN_ON(!intel_irqs_enabled(ring->dev->dev_private)); 438 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
439 439
440 /* Try to read in pairs */ 440 /* Try to read in pairs */
441 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, 441 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
442 execlist_link) { 442 execlist_link) {
443 if (!req0) { 443 if (!req0) {
444 req0 = cursor; 444 req0 = cursor;
@@ -447,7 +447,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
447 * will update tail past first request's workload */ 447 * will update tail past first request's workload */
448 cursor->elsp_submitted = req0->elsp_submitted; 448 cursor->elsp_submitted = req0->elsp_submitted;
449 list_move_tail(&req0->execlist_link, 449 list_move_tail(&req0->execlist_link,
450 &ring->execlist_retired_req_list); 450 &engine->execlist_retired_req_list);
451 req0 = cursor; 451 req0 = cursor;
452 } else { 452 } else {
453 req1 = cursor; 453 req1 = cursor;
@@ -459,7 +459,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
459 if (unlikely(!req0)) 459 if (unlikely(!req0))
460 return; 460 return;
461 461
462 if (req0->elsp_submitted & ring->idle_lite_restore_wa) { 462 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
463 /* 463 /*
464 * WaIdleLiteRestore: make sure we never cause a lite restore 464 * WaIdleLiteRestore: make sure we never cause a lite restore
465 * with HEAD==TAIL. 465 * with HEAD==TAIL.
@@ -470,7 +470,7 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
470 */ 470 */
471 struct intel_ringbuffer *ringbuf; 471 struct intel_ringbuffer *ringbuf;
472 472
473 ringbuf = req0->ctx->engine[ring->id].ringbuf; 473 ringbuf = req0->ctx->engine[engine->id].ringbuf;
474 req0->tail += 8; 474 req0->tail += 8;
475 req0->tail &= ringbuf->size - 1; 475 req0->tail &= ringbuf->size - 1;
476 } 476 }
@@ -478,34 +478,34 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *ring)
478 execlists_submit_requests(req0, req1); 478 execlists_submit_requests(req0, req1);
479} 479}
480 480
481static void execlists_context_unqueue(struct intel_engine_cs *ring) 481static void execlists_context_unqueue(struct intel_engine_cs *engine)
482{ 482{
483 struct drm_i915_private *dev_priv = ring->dev->dev_private; 483 struct drm_i915_private *dev_priv = engine->dev->dev_private;
484 484
485 spin_lock(&dev_priv->uncore.lock); 485 spin_lock(&dev_priv->uncore.lock);
486 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); 486 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
487 487
488 execlists_context_unqueue__locked(ring); 488 execlists_context_unqueue__locked(engine);
489 489
490 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 490 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
491 spin_unlock(&dev_priv->uncore.lock); 491 spin_unlock(&dev_priv->uncore.lock);
492} 492}
493 493
494static unsigned int 494static unsigned int
495execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id) 495execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496{ 496{
497 struct drm_i915_gem_request *head_req; 497 struct drm_i915_gem_request *head_req;
498 498
499 assert_spin_locked(&ring->execlist_lock); 499 assert_spin_locked(&engine->execlist_lock);
500 500
501 head_req = list_first_entry_or_null(&ring->execlist_queue, 501 head_req = list_first_entry_or_null(&engine->execlist_queue,
502 struct drm_i915_gem_request, 502 struct drm_i915_gem_request,
503 execlist_link); 503 execlist_link);
504 504
505 if (!head_req) 505 if (!head_req)
506 return 0; 506 return 0;
507 507
508 if (unlikely(intel_execlists_ctx_id(head_req->ctx, ring) != request_id)) 508 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
509 return 0; 509 return 0;
510 510
511 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 511 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
@@ -514,26 +514,26 @@ execlists_check_remove_request(struct intel_engine_cs *ring, u32 request_id)
514 return 0; 514 return 0;
515 515
516 list_move_tail(&head_req->execlist_link, 516 list_move_tail(&head_req->execlist_link,
517 &ring->execlist_retired_req_list); 517 &engine->execlist_retired_req_list);
518 518
519 return 1; 519 return 1;
520} 520}
521 521
522static u32 522static u32
523get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer, 523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
524 u32 *context_id) 524 u32 *context_id)
525{ 525{
526 struct drm_i915_private *dev_priv = ring->dev->dev_private; 526 struct drm_i915_private *dev_priv = engine->dev->dev_private;
527 u32 status; 527 u32 status;
528 528
529 read_pointer %= GEN8_CSB_ENTRIES; 529 read_pointer %= GEN8_CSB_ENTRIES;
530 530
531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); 531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
532 532
533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
534 return 0; 534 return 0;
535 535
536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(ring, 536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
537 read_pointer)); 537 read_pointer));
538 538
539 return status; 539 return status;
@@ -546,33 +546,34 @@ get_context_status(struct intel_engine_cs *ring, unsigned int read_pointer,
546 * Check the unread Context Status Buffers and manage the submission of new 546 * Check the unread Context Status Buffers and manage the submission of new
547 * contexts to the ELSP accordingly. 547 * contexts to the ELSP accordingly.
548 */ 548 */
549void intel_lrc_irq_handler(struct intel_engine_cs *ring) 549void intel_lrc_irq_handler(struct intel_engine_cs *engine)
550{ 550{
551 struct drm_i915_private *dev_priv = ring->dev->dev_private; 551 struct drm_i915_private *dev_priv = engine->dev->dev_private;
552 u32 status_pointer; 552 u32 status_pointer;
553 unsigned int read_pointer, write_pointer; 553 unsigned int read_pointer, write_pointer;
554 u32 status = 0; 554 u32 status = 0;
555 u32 status_id; 555 u32 status_id;
556 unsigned int submit_contexts = 0; 556 unsigned int submit_contexts = 0;
557 557
558 spin_lock(&ring->execlist_lock); 558 spin_lock(&engine->execlist_lock);
559 559
560 spin_lock(&dev_priv->uncore.lock); 560 spin_lock(&dev_priv->uncore.lock);
561 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); 561 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
562 562
563 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(ring)); 563 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
564 564
565 read_pointer = ring->next_context_status_buffer; 565 read_pointer = engine->next_context_status_buffer;
566 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 566 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
567 if (read_pointer > write_pointer) 567 if (read_pointer > write_pointer)
568 write_pointer += GEN8_CSB_ENTRIES; 568 write_pointer += GEN8_CSB_ENTRIES;
569 569
570 while (read_pointer < write_pointer) { 570 while (read_pointer < write_pointer) {
571 status = get_context_status(ring, ++read_pointer, &status_id); 571 status = get_context_status(engine, ++read_pointer,
572 &status_id);
572 573
573 if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) { 574 if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) {
574 if (status & GEN8_CTX_STATUS_LITE_RESTORE) { 575 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
575 if (execlists_check_remove_request(ring, status_id)) 576 if (execlists_check_remove_request(engine, status_id))
576 WARN(1, "Lite Restored request removed from queue\n"); 577 WARN(1, "Lite Restored request removed from queue\n");
577 } else 578 } else
578 WARN(1, "Preemption without Lite Restore\n"); 579 WARN(1, "Preemption without Lite Restore\n");
@@ -581,27 +582,28 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
581 if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE | 582 if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE |
582 GEN8_CTX_STATUS_ELEMENT_SWITCH)) 583 GEN8_CTX_STATUS_ELEMENT_SWITCH))
583 submit_contexts += 584 submit_contexts +=
584 execlists_check_remove_request(ring, status_id); 585 execlists_check_remove_request(engine,
586 status_id);
585 } 587 }
586 588
587 if (submit_contexts) { 589 if (submit_contexts) {
588 if (!ring->disable_lite_restore_wa || 590 if (!engine->disable_lite_restore_wa ||
589 (status & GEN8_CTX_STATUS_ACTIVE_IDLE)) 591 (status & GEN8_CTX_STATUS_ACTIVE_IDLE))
590 execlists_context_unqueue__locked(ring); 592 execlists_context_unqueue__locked(engine);
591 } 593 }
592 594
593 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; 595 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
594 596
595 /* Update the read pointer to the old write pointer. Manual ringbuffer 597 /* Update the read pointer to the old write pointer. Manual ringbuffer
596 * management ftw </sarcasm> */ 598 * management ftw </sarcasm> */
597 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(ring), 599 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
598 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 600 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
599 ring->next_context_status_buffer << 8)); 601 engine->next_context_status_buffer << 8));
600 602
601 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); 603 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
602 spin_unlock(&dev_priv->uncore.lock); 604 spin_unlock(&dev_priv->uncore.lock);
603 605
604 spin_unlock(&ring->execlist_lock); 606 spin_unlock(&engine->execlist_lock);
605 607
606 if (unlikely(submit_contexts > 2)) 608 if (unlikely(submit_contexts > 2))
607 DRM_ERROR("More than two context complete events?\n"); 609 DRM_ERROR("More than two context complete events?\n");
@@ -1020,53 +1022,53 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
1020 return 0; 1022 return 0;
1021} 1023}
1022 1024
1023void intel_execlists_retire_requests(struct intel_engine_cs *ring) 1025void intel_execlists_retire_requests(struct intel_engine_cs *engine)
1024{ 1026{
1025 struct drm_i915_gem_request *req, *tmp; 1027 struct drm_i915_gem_request *req, *tmp;
1026 struct list_head retired_list; 1028 struct list_head retired_list;
1027 1029
1028 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1030 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1029 if (list_empty(&ring->execlist_retired_req_list)) 1031 if (list_empty(&engine->execlist_retired_req_list))
1030 return; 1032 return;
1031 1033
1032 INIT_LIST_HEAD(&retired_list); 1034 INIT_LIST_HEAD(&retired_list);
1033 spin_lock_irq(&ring->execlist_lock); 1035 spin_lock_irq(&engine->execlist_lock);
1034 list_replace_init(&ring->execlist_retired_req_list, &retired_list); 1036 list_replace_init(&engine->execlist_retired_req_list, &retired_list);
1035 spin_unlock_irq(&ring->execlist_lock); 1037 spin_unlock_irq(&engine->execlist_lock);
1036 1038
1037 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 1039 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
1038 struct intel_context *ctx = req->ctx; 1040 struct intel_context *ctx = req->ctx;
1039 struct drm_i915_gem_object *ctx_obj = 1041 struct drm_i915_gem_object *ctx_obj =
1040 ctx->engine[ring->id].state; 1042 ctx->engine[engine->id].state;
1041 1043
1042 if (ctx_obj && (ctx != req->i915->kernel_context)) 1044 if (ctx_obj && (ctx != req->i915->kernel_context))
1043 intel_lr_context_unpin(ctx, ring); 1045 intel_lr_context_unpin(ctx, engine);
1044 1046
1045 list_del(&req->execlist_link); 1047 list_del(&req->execlist_link);
1046 i915_gem_request_unreference(req); 1048 i915_gem_request_unreference(req);
1047 } 1049 }
1048} 1050}
1049 1051
1050void intel_logical_ring_stop(struct intel_engine_cs *ring) 1052void intel_logical_ring_stop(struct intel_engine_cs *engine)
1051{ 1053{
1052 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1054 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1053 int ret; 1055 int ret;
1054 1056
1055 if (!intel_ring_initialized(ring)) 1057 if (!intel_ring_initialized(engine))
1056 return; 1058 return;
1057 1059
1058 ret = intel_ring_idle(ring); 1060 ret = intel_ring_idle(engine);
1059 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 1061 if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
1060 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1062 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1061 ring->name, ret); 1063 engine->name, ret);
1062 1064
1063 /* TODO: Is this correct with Execlists enabled? */ 1065 /* TODO: Is this correct with Execlists enabled? */
1064 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 1066 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
1065 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 1067 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
1066 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 1068 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
1067 return; 1069 return;
1068 } 1070 }
1069 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 1071 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
1070} 1072}
1071 1073
1072int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) 1074int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
@@ -1086,17 +1088,17 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
1086} 1088}
1087 1089
1088static int intel_lr_context_do_pin(struct intel_context *ctx, 1090static int intel_lr_context_do_pin(struct intel_context *ctx,
1089 struct intel_engine_cs *ring) 1091 struct intel_engine_cs *engine)
1090{ 1092{
1091 struct drm_device *dev = ring->dev; 1093 struct drm_device *dev = engine->dev;
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1094 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 1095 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1094 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 1096 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
1095 struct page *lrc_state_page; 1097 struct page *lrc_state_page;
1096 uint32_t *lrc_reg_state; 1098 uint32_t *lrc_reg_state;
1097 int ret; 1099 int ret;
1098 1100
1099 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1101 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1100 1102
1101 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 1103 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1102 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 1104 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
@@ -1109,15 +1111,15 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
1109 goto unpin_ctx_obj; 1111 goto unpin_ctx_obj;
1110 } 1112 }
1111 1113
1112 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); 1114 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
1113 if (ret) 1115 if (ret)
1114 goto unpin_ctx_obj; 1116 goto unpin_ctx_obj;
1115 1117
1116 ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 1118 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1117 intel_lr_context_descriptor_update(ctx, ring); 1119 intel_lr_context_descriptor_update(ctx, engine);
1118 lrc_reg_state = kmap(lrc_state_page); 1120 lrc_reg_state = kmap(lrc_state_page);
1119 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 1121 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
1120 ctx->engine[ring->id].lrc_reg_state = lrc_reg_state; 1122 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
1121 ctx_obj->dirty = true; 1123 ctx_obj->dirty = true;
1122 1124
1123 /* Invalidate GuC TLB. */ 1125 /* Invalidate GuC TLB. */
@@ -1235,7 +1237,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1235 * This WA is also required for Gen9 so extracting as a function avoids 1237 * This WA is also required for Gen9 so extracting as a function avoids
1236 * code duplication. 1238 * code duplication.
1237 */ 1239 */
1238static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, 1240static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1239 uint32_t *const batch, 1241 uint32_t *const batch,
1240 uint32_t index) 1242 uint32_t index)
1241{ 1243{
@@ -1247,13 +1249,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1247 * this batch updates GEN8_L3SQCREG4 with default value we need to 1249 * this batch updates GEN8_L3SQCREG4 with default value we need to
1248 * set this bit here to retain the WA during flush. 1250 * set this bit here to retain the WA during flush.
1249 */ 1251 */
1250 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0)) 1252 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
1251 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1253 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1252 1254
1253 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1255 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1254 MI_SRM_LRM_GLOBAL_GTT)); 1256 MI_SRM_LRM_GLOBAL_GTT));
1255 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1257 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1256 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1258 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1257 wa_ctx_emit(batch, index, 0); 1259 wa_ctx_emit(batch, index, 0);
1258 1260
1259 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1261 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1271,7 +1273,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1271 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | 1273 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1272 MI_SRM_LRM_GLOBAL_GTT)); 1274 MI_SRM_LRM_GLOBAL_GTT));
1273 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 1275 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1274 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); 1276 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1275 wa_ctx_emit(batch, index, 0); 1277 wa_ctx_emit(batch, index, 0);
1276 1278
1277 return index; 1279 return index;
@@ -1324,7 +1326,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1324 * Return: non-zero if we exceed the PAGE_SIZE limit. 1326 * Return: non-zero if we exceed the PAGE_SIZE limit.
1325 */ 1327 */
1326 1328
1327static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, 1329static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1328 struct i915_wa_ctx_bb *wa_ctx, 1330 struct i915_wa_ctx_bb *wa_ctx,
1329 uint32_t *const batch, 1331 uint32_t *const batch,
1330 uint32_t *offset) 1332 uint32_t *offset)
@@ -1336,8 +1338,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1336 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1338 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1337 1339
1338 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1340 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1339 if (IS_BROADWELL(ring->dev)) { 1341 if (IS_BROADWELL(engine->dev)) {
1340 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index); 1342 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1341 if (rc < 0) 1343 if (rc < 0)
1342 return rc; 1344 return rc;
1343 index = rc; 1345 index = rc;
@@ -1345,7 +1347,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1345 1347
1346 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1348 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1347 /* Actual scratch location is at 128 bytes offset */ 1349 /* Actual scratch location is at 128 bytes offset */
1348 scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES; 1350 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1349 1351
1350 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); 1352 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1351 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | 1353 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1387,7 +1389,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1387 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding 1389 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1388 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. 1390 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1389 */ 1391 */
1390static int gen8_init_perctx_bb(struct intel_engine_cs *ring, 1392static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1391 struct i915_wa_ctx_bb *wa_ctx, 1393 struct i915_wa_ctx_bb *wa_ctx,
1392 uint32_t *const batch, 1394 uint32_t *const batch,
1393 uint32_t *offset) 1395 uint32_t *offset)
@@ -1402,13 +1404,13 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
1402 return wa_ctx_end(wa_ctx, *offset = index, 1); 1404 return wa_ctx_end(wa_ctx, *offset = index, 1);
1403} 1405}
1404 1406
1405static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, 1407static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1406 struct i915_wa_ctx_bb *wa_ctx, 1408 struct i915_wa_ctx_bb *wa_ctx,
1407 uint32_t *const batch, 1409 uint32_t *const batch,
1408 uint32_t *offset) 1410 uint32_t *offset)
1409{ 1411{
1410 int ret; 1412 int ret;
1411 struct drm_device *dev = ring->dev; 1413 struct drm_device *dev = engine->dev;
1412 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1414 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1413 1415
1414 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1416 /* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1417,7 +1419,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1417 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1419 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1418 1420
1419 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1421 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1420 ret = gen8_emit_flush_coherentl3_wa(ring, batch, index); 1422 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1421 if (ret < 0) 1423 if (ret < 0)
1422 return ret; 1424 return ret;
1423 index = ret; 1425 index = ret;
@@ -1429,12 +1431,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1429 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); 1431 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1430} 1432}
1431 1433
1432static int gen9_init_perctx_bb(struct intel_engine_cs *ring, 1434static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1433 struct i915_wa_ctx_bb *wa_ctx, 1435 struct i915_wa_ctx_bb *wa_ctx,
1434 uint32_t *const batch, 1436 uint32_t *const batch,
1435 uint32_t *offset) 1437 uint32_t *offset)
1436{ 1438{
1437 struct drm_device *dev = ring->dev; 1439 struct drm_device *dev = engine->dev;
1438 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1440 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1439 1441
1440 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1442 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1457,60 +1459,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1457 return wa_ctx_end(wa_ctx, *offset = index, 1); 1459 return wa_ctx_end(wa_ctx, *offset = index, 1);
1458} 1460}
1459 1461
1460static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size) 1462static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1461{ 1463{
1462 int ret; 1464 int ret;
1463 1465
1464 ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size)); 1466 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
1465 if (!ring->wa_ctx.obj) { 1467 PAGE_ALIGN(size));
1468 if (!engine->wa_ctx.obj) {
1466 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1469 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1467 return -ENOMEM; 1470 return -ENOMEM;
1468 } 1471 }
1469 1472
1470 ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0); 1473 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1471 if (ret) { 1474 if (ret) {
1472 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", 1475 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1473 ret); 1476 ret);
1474 drm_gem_object_unreference(&ring->wa_ctx.obj->base); 1477 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1475 return ret; 1478 return ret;
1476 } 1479 }
1477 1480
1478 return 0; 1481 return 0;
1479} 1482}
1480 1483
1481static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring) 1484static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1482{ 1485{
1483 if (ring->wa_ctx.obj) { 1486 if (engine->wa_ctx.obj) {
1484 i915_gem_object_ggtt_unpin(ring->wa_ctx.obj); 1487 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1485 drm_gem_object_unreference(&ring->wa_ctx.obj->base); 1488 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1486 ring->wa_ctx.obj = NULL; 1489 engine->wa_ctx.obj = NULL;
1487 } 1490 }
1488} 1491}
1489 1492
1490static int intel_init_workaround_bb(struct intel_engine_cs *ring) 1493static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1491{ 1494{
1492 int ret; 1495 int ret;
1493 uint32_t *batch; 1496 uint32_t *batch;
1494 uint32_t offset; 1497 uint32_t offset;
1495 struct page *page; 1498 struct page *page;
1496 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 1499 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1497 1500
1498 WARN_ON(ring->id != RCS); 1501 WARN_ON(engine->id != RCS);
1499 1502
1500 /* update this when WA for higher Gen are added */ 1503 /* update this when WA for higher Gen are added */
1501 if (INTEL_INFO(ring->dev)->gen > 9) { 1504 if (INTEL_INFO(engine->dev)->gen > 9) {
1502 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1505 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1503 INTEL_INFO(ring->dev)->gen); 1506 INTEL_INFO(engine->dev)->gen);
1504 return 0; 1507 return 0;
1505 } 1508 }
1506 1509
1507 /* some WA perform writes to scratch page, ensure it is valid */ 1510 /* some WA perform writes to scratch page, ensure it is valid */
1508 if (ring->scratch.obj == NULL) { 1511 if (engine->scratch.obj == NULL) {
1509 DRM_ERROR("scratch page not allocated for %s\n", ring->name); 1512 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1510 return -EINVAL; 1513 return -EINVAL;
1511 } 1514 }
1512 1515
1513 ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE); 1516 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1514 if (ret) { 1517 if (ret) {
1515 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1518 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1516 return ret; 1519 return ret;
@@ -1520,29 +1523,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1520 batch = kmap_atomic(page); 1523 batch = kmap_atomic(page);
1521 offset = 0; 1524 offset = 0;
1522 1525
1523 if (INTEL_INFO(ring->dev)->gen == 8) { 1526 if (INTEL_INFO(engine->dev)->gen == 8) {
1524 ret = gen8_init_indirectctx_bb(ring, 1527 ret = gen8_init_indirectctx_bb(engine,
1525 &wa_ctx->indirect_ctx, 1528 &wa_ctx->indirect_ctx,
1526 batch, 1529 batch,
1527 &offset); 1530 &offset);
1528 if (ret) 1531 if (ret)
1529 goto out; 1532 goto out;
1530 1533
1531 ret = gen8_init_perctx_bb(ring, 1534 ret = gen8_init_perctx_bb(engine,
1532 &wa_ctx->per_ctx, 1535 &wa_ctx->per_ctx,
1533 batch, 1536 batch,
1534 &offset); 1537 &offset);
1535 if (ret) 1538 if (ret)
1536 goto out; 1539 goto out;
1537 } else if (INTEL_INFO(ring->dev)->gen == 9) { 1540 } else if (INTEL_INFO(engine->dev)->gen == 9) {
1538 ret = gen9_init_indirectctx_bb(ring, 1541 ret = gen9_init_indirectctx_bb(engine,
1539 &wa_ctx->indirect_ctx, 1542 &wa_ctx->indirect_ctx,
1540 batch, 1543 batch,
1541 &offset); 1544 &offset);
1542 if (ret) 1545 if (ret)
1543 goto out; 1546 goto out;
1544 1547
1545 ret = gen9_init_perctx_bb(ring, 1548 ret = gen9_init_perctx_bb(engine,
1546 &wa_ctx->per_ctx, 1549 &wa_ctx->per_ctx,
1547 batch, 1550 batch,
1548 &offset); 1551 &offset);
@@ -1553,27 +1556,28 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1553out: 1556out:
1554 kunmap_atomic(batch); 1557 kunmap_atomic(batch);
1555 if (ret) 1558 if (ret)
1556 lrc_destroy_wa_ctx_obj(ring); 1559 lrc_destroy_wa_ctx_obj(engine);
1557 1560
1558 return ret; 1561 return ret;
1559} 1562}
1560 1563
1561static int gen8_init_common_ring(struct intel_engine_cs *ring) 1564static int gen8_init_common_ring(struct intel_engine_cs *engine)
1562{ 1565{
1563 struct drm_device *dev = ring->dev; 1566 struct drm_device *dev = engine->dev;
1564 struct drm_i915_private *dev_priv = dev->dev_private; 1567 struct drm_i915_private *dev_priv = dev->dev_private;
1565 unsigned int next_context_status_buffer_hw; 1568 unsigned int next_context_status_buffer_hw;
1566 1569
1567 lrc_setup_hardware_status_page(ring, 1570 lrc_setup_hardware_status_page(engine,
1568 dev_priv->kernel_context->engine[ring->id].state); 1571 dev_priv->kernel_context->engine[engine->id].state);
1569 1572
1570 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1573 I915_WRITE_IMR(engine,
1571 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1574 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1575 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1572 1576
1573 I915_WRITE(RING_MODE_GEN7(ring), 1577 I915_WRITE(RING_MODE_GEN7(engine),
1574 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1578 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1575 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1579 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1576 POSTING_READ(RING_MODE_GEN7(ring)); 1580 POSTING_READ(RING_MODE_GEN7(engine));
1577 1581
1578 /* 1582 /*
1579 * Instead of resetting the Context Status Buffer (CSB) read pointer to 1583 * Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1588,7 +1592,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1588 * BXT | ? | ? | 1592 * BXT | ? | ? |
1589 */ 1593 */
1590 next_context_status_buffer_hw = 1594 next_context_status_buffer_hw =
1591 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); 1595 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1592 1596
1593 /* 1597 /*
1594 * When the CSB registers are reset (also after power-up / gpu reset), 1598 * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1598,21 +1602,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1598 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) 1602 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1599 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); 1603 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1600 1604
1601 ring->next_context_status_buffer = next_context_status_buffer_hw; 1605 engine->next_context_status_buffer = next_context_status_buffer_hw;
1602 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); 1606 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1603 1607
1604 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 1608 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
1605 1609
1606 return 0; 1610 return 0;
1607} 1611}
1608 1612
1609static int gen8_init_render_ring(struct intel_engine_cs *ring) 1613static int gen8_init_render_ring(struct intel_engine_cs *engine)
1610{ 1614{
1611 struct drm_device *dev = ring->dev; 1615 struct drm_device *dev = engine->dev;
1612 struct drm_i915_private *dev_priv = dev->dev_private; 1616 struct drm_i915_private *dev_priv = dev->dev_private;
1613 int ret; 1617 int ret;
1614 1618
1615 ret = gen8_init_common_ring(ring); 1619 ret = gen8_init_common_ring(engine);
1616 if (ret) 1620 if (ret)
1617 return ret; 1621 return ret;
1618 1622
@@ -1626,18 +1630,18 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1626 1630
1627 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1631 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1628 1632
1629 return init_workarounds_ring(ring); 1633 return init_workarounds_ring(engine);
1630} 1634}
1631 1635
1632static int gen9_init_render_ring(struct intel_engine_cs *ring) 1636static int gen9_init_render_ring(struct intel_engine_cs *engine)
1633{ 1637{
1634 int ret; 1638 int ret;
1635 1639
1636 ret = gen8_init_common_ring(ring); 1640 ret = gen8_init_common_ring(engine);
1637 if (ret) 1641 if (ret)
1638 return ret; 1642 return ret;
1639 1643
1640 return init_workarounds_ring(ring); 1644 return init_workarounds_ring(engine);
1641} 1645}
1642 1646
1643static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1647static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1712,9 +1716,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1712 return 0; 1716 return 0;
1713} 1717}
1714 1718
1715static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) 1719static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1716{ 1720{
1717 struct drm_device *dev = ring->dev; 1721 struct drm_device *dev = engine->dev;
1718 struct drm_i915_private *dev_priv = dev->dev_private; 1722 struct drm_i915_private *dev_priv = dev->dev_private;
1719 unsigned long flags; 1723 unsigned long flags;
1720 1724
@@ -1722,25 +1726,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1722 return false; 1726 return false;
1723 1727
1724 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1728 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1725 if (ring->irq_refcount++ == 0) { 1729 if (engine->irq_refcount++ == 0) {
1726 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1730 I915_WRITE_IMR(engine,
1727 POSTING_READ(RING_IMR(ring->mmio_base)); 1731 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1732 POSTING_READ(RING_IMR(engine->mmio_base));
1728 } 1733 }
1729 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1734 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1730 1735
1731 return true; 1736 return true;
1732} 1737}
1733 1738
1734static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) 1739static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1735{ 1740{
1736 struct drm_device *dev = ring->dev; 1741 struct drm_device *dev = engine->dev;
1737 struct drm_i915_private *dev_priv = dev->dev_private; 1742 struct drm_i915_private *dev_priv = dev->dev_private;
1738 unsigned long flags; 1743 unsigned long flags;
1739 1744
1740 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1745 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1741 if (--ring->irq_refcount == 0) { 1746 if (--engine->irq_refcount == 0) {
1742 I915_WRITE_IMR(ring, ~ring->irq_keep_mask); 1747 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1743 POSTING_READ(RING_IMR(ring->mmio_base)); 1748 POSTING_READ(RING_IMR(engine->mmio_base));
1744 } 1749 }
1745 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1750 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1746} 1751}
@@ -1848,17 +1853,18 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1848 return 0; 1853 return 0;
1849} 1854}
1850 1855
1851static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1856static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1852{ 1857{
1853 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1858 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1854} 1859}
1855 1860
1856static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1861static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1857{ 1862{
1858 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1863 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1859} 1864}
1860 1865
1861static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1866static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
1867 bool lazy_coherency)
1862{ 1868{
1863 1869
1864 /* 1870 /*
@@ -1873,17 +1879,17 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1873 */ 1879 */
1874 1880
1875 if (!lazy_coherency) 1881 if (!lazy_coherency)
1876 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); 1882 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1877 1883
1878 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1884 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1879} 1885}
1880 1886
1881static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1887static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1882{ 1888{
1883 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1889 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1884 1890
1885 /* See bxt_a_get_seqno() explaining the reason for the clflush. */ 1891 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1886 intel_flush_status_page(ring, I915_GEM_HWS_INDEX); 1892 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1887} 1893}
1888 1894
1889/* 1895/*
@@ -2002,109 +2008,109 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
2002 * @ring: Engine Command Streamer. 2008 * @ring: Engine Command Streamer.
2003 * 2009 *
2004 */ 2010 */
2005void intel_logical_ring_cleanup(struct intel_engine_cs *ring) 2011void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
2006{ 2012{
2007 struct drm_i915_private *dev_priv; 2013 struct drm_i915_private *dev_priv;
2008 2014
2009 if (!intel_ring_initialized(ring)) 2015 if (!intel_ring_initialized(engine))
2010 return; 2016 return;
2011 2017
2012 dev_priv = ring->dev->dev_private; 2018 dev_priv = engine->dev->dev_private;
2013 2019
2014 if (ring->buffer) { 2020 if (engine->buffer) {
2015 intel_logical_ring_stop(ring); 2021 intel_logical_ring_stop(engine);
2016 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 2022 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
2017 } 2023 }
2018 2024
2019 if (ring->cleanup) 2025 if (engine->cleanup)
2020 ring->cleanup(ring); 2026 engine->cleanup(engine);
2021 2027
2022 i915_cmd_parser_fini_ring(ring); 2028 i915_cmd_parser_fini_ring(engine);
2023 i915_gem_batch_pool_fini(&ring->batch_pool); 2029 i915_gem_batch_pool_fini(&engine->batch_pool);
2024 2030
2025 if (ring->status_page.obj) { 2031 if (engine->status_page.obj) {
2026 kunmap(sg_page(ring->status_page.obj->pages->sgl)); 2032 kunmap(sg_page(engine->status_page.obj->pages->sgl));
2027 ring->status_page.obj = NULL; 2033 engine->status_page.obj = NULL;
2028 } 2034 }
2029 2035
2030 ring->idle_lite_restore_wa = 0; 2036 engine->idle_lite_restore_wa = 0;
2031 ring->disable_lite_restore_wa = false; 2037 engine->disable_lite_restore_wa = false;
2032 ring->ctx_desc_template = 0; 2038 engine->ctx_desc_template = 0;
2033 2039
2034 lrc_destroy_wa_ctx_obj(ring); 2040 lrc_destroy_wa_ctx_obj(engine);
2035 ring->dev = NULL; 2041 engine->dev = NULL;
2036} 2042}
2037 2043
2038static void 2044static void
2039logical_ring_default_vfuncs(struct drm_device *dev, 2045logical_ring_default_vfuncs(struct drm_device *dev,
2040 struct intel_engine_cs *ring) 2046 struct intel_engine_cs *engine)
2041{ 2047{
2042 /* Default vfuncs which can be overriden by each engine. */ 2048 /* Default vfuncs which can be overriden by each engine. */
2043 ring->init_hw = gen8_init_common_ring; 2049 engine->init_hw = gen8_init_common_ring;
2044 ring->emit_request = gen8_emit_request; 2050 engine->emit_request = gen8_emit_request;
2045 ring->emit_flush = gen8_emit_flush; 2051 engine->emit_flush = gen8_emit_flush;
2046 ring->irq_get = gen8_logical_ring_get_irq; 2052 engine->irq_get = gen8_logical_ring_get_irq;
2047 ring->irq_put = gen8_logical_ring_put_irq; 2053 engine->irq_put = gen8_logical_ring_put_irq;
2048 ring->emit_bb_start = gen8_emit_bb_start; 2054 engine->emit_bb_start = gen8_emit_bb_start;
2049 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 2055 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2050 ring->get_seqno = bxt_a_get_seqno; 2056 engine->get_seqno = bxt_a_get_seqno;
2051 ring->set_seqno = bxt_a_set_seqno; 2057 engine->set_seqno = bxt_a_set_seqno;
2052 } else { 2058 } else {
2053 ring->get_seqno = gen8_get_seqno; 2059 engine->get_seqno = gen8_get_seqno;
2054 ring->set_seqno = gen8_set_seqno; 2060 engine->set_seqno = gen8_set_seqno;
2055 } 2061 }
2056} 2062}
2057 2063
2058static inline void 2064static inline void
2059logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) 2065logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
2060{ 2066{
2061 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 2067 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2062 ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 2068 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2063} 2069}
2064 2070
2065static int 2071static int
2066logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) 2072logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2067{ 2073{
2068 struct intel_context *dctx = to_i915(dev)->kernel_context; 2074 struct intel_context *dctx = to_i915(dev)->kernel_context;
2069 int ret; 2075 int ret;
2070 2076
2071 /* Intentionally left blank. */ 2077 /* Intentionally left blank. */
2072 ring->buffer = NULL; 2078 engine->buffer = NULL;
2073 2079
2074 ring->dev = dev; 2080 engine->dev = dev;
2075 INIT_LIST_HEAD(&ring->active_list); 2081 INIT_LIST_HEAD(&engine->active_list);
2076 INIT_LIST_HEAD(&ring->request_list); 2082 INIT_LIST_HEAD(&engine->request_list);
2077 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2083 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2078 init_waitqueue_head(&ring->irq_queue); 2084 init_waitqueue_head(&engine->irq_queue);
2079 2085
2080 INIT_LIST_HEAD(&ring->buffers); 2086 INIT_LIST_HEAD(&engine->buffers);
2081 INIT_LIST_HEAD(&ring->execlist_queue); 2087 INIT_LIST_HEAD(&engine->execlist_queue);
2082 INIT_LIST_HEAD(&ring->execlist_retired_req_list); 2088 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2083 spin_lock_init(&ring->execlist_lock); 2089 spin_lock_init(&engine->execlist_lock);
2084 2090
2085 logical_ring_init_platform_invariants(ring); 2091 logical_ring_init_platform_invariants(engine);
2086 2092
2087 ret = i915_cmd_parser_init_ring(ring); 2093 ret = i915_cmd_parser_init_ring(engine);
2088 if (ret) 2094 if (ret)
2089 goto error; 2095 goto error;
2090 2096
2091 ret = intel_lr_context_deferred_alloc(dctx, ring); 2097 ret = intel_lr_context_deferred_alloc(dctx, engine);
2092 if (ret) 2098 if (ret)
2093 goto error; 2099 goto error;
2094 2100
2095 /* As this is the default context, always pin it */ 2101 /* As this is the default context, always pin it */
2096 ret = intel_lr_context_do_pin(dctx, ring); 2102 ret = intel_lr_context_do_pin(dctx, engine);
2097 if (ret) { 2103 if (ret) {
2098 DRM_ERROR( 2104 DRM_ERROR(
2099 "Failed to pin and map ringbuffer %s: %d\n", 2105 "Failed to pin and map ringbuffer %s: %d\n",
2100 ring->name, ret); 2106 engine->name, ret);
2101 goto error; 2107 goto error;
2102 } 2108 }
2103 2109
2104 return 0; 2110 return 0;
2105 2111
2106error: 2112error:
2107 intel_logical_ring_cleanup(ring); 2113 intel_logical_ring_cleanup(engine);
2108 return ret; 2114 return ret;
2109} 2115}
2110 2116
@@ -2329,13 +2335,13 @@ make_rpcs(struct drm_device *dev)
2329 return rpcs; 2335 return rpcs;
2330} 2336}
2331 2337
2332static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) 2338static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2333{ 2339{
2334 u32 indirect_ctx_offset; 2340 u32 indirect_ctx_offset;
2335 2341
2336 switch (INTEL_INFO(ring->dev)->gen) { 2342 switch (INTEL_INFO(engine->dev)->gen) {
2337 default: 2343 default:
2338 MISSING_CASE(INTEL_INFO(ring->dev)->gen); 2344 MISSING_CASE(INTEL_INFO(engine->dev)->gen);
2339 /* fall through */ 2345 /* fall through */
2340 case 9: 2346 case 9:
2341 indirect_ctx_offset = 2347 indirect_ctx_offset =
@@ -2352,9 +2358,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
2352 2358
2353static int 2359static int
2354populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, 2360populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2355 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) 2361 struct intel_engine_cs *engine,
2362 struct intel_ringbuffer *ringbuf)
2356{ 2363{
2357 struct drm_device *dev = ring->dev; 2364 struct drm_device *dev = engine->dev;
2358 struct drm_i915_private *dev_priv = dev->dev_private; 2365 struct drm_i915_private *dev_priv = dev->dev_private;
2359 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2366 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2360 struct page *page; 2367 struct page *page;
@@ -2389,33 +2396,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2389 * recreate this batchbuffer with new values (including all the missing 2396 * recreate this batchbuffer with new values (including all the missing
2390 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ 2397 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2391 reg_state[CTX_LRI_HEADER_0] = 2398 reg_state[CTX_LRI_HEADER_0] =
2392 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; 2399 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2393 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), 2400 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2401 RING_CONTEXT_CONTROL(engine),
2394 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2402 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2395 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2403 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2396 (HAS_RESOURCE_STREAMER(dev) ? 2404 (HAS_RESOURCE_STREAMER(dev) ?
2397 CTX_CTRL_RS_CTX_ENABLE : 0))); 2405 CTX_CTRL_RS_CTX_ENABLE : 0)));
2398 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); 2406 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2399 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); 2407 0);
2408 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2409 0);
2400 /* Ring buffer start address is not known until the buffer is pinned. 2410 /* Ring buffer start address is not known until the buffer is pinned.
2401 * It is written to the context image in execlists_update_context() 2411 * It is written to the context image in execlists_update_context()
2402 */ 2412 */
2403 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0); 2413 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2404 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base), 2414 RING_START(engine->mmio_base), 0);
2415 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2416 RING_CTL(engine->mmio_base),
2405 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); 2417 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2406 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0); 2418 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2407 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0); 2419 RING_BBADDR_UDW(engine->mmio_base), 0);
2408 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base), 2420 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2421 RING_BBADDR(engine->mmio_base), 0);
2422 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2423 RING_BBSTATE(engine->mmio_base),
2409 RING_BB_PPGTT); 2424 RING_BB_PPGTT);
2410 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0); 2425 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2411 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0); 2426 RING_SBBADDR_UDW(engine->mmio_base), 0);
2412 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0); 2427 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2413 if (ring->id == RCS) { 2428 RING_SBBADDR(engine->mmio_base), 0);
2414 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0); 2429 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2415 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0); 2430 RING_SBBSTATE(engine->mmio_base), 0);
2416 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0); 2431 if (engine->id == RCS) {
2417 if (ring->wa_ctx.obj) { 2432 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2418 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; 2433 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2434 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2435 RING_INDIRECT_CTX(engine->mmio_base), 0);
2436 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2437 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2438 if (engine->wa_ctx.obj) {
2439 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2419 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); 2440 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2420 2441
2421 reg_state[CTX_RCS_INDIRECT_CTX+1] = 2442 reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2423,7 +2444,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2423 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 2444 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2424 2445
2425 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 2446 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2426 intel_lr_indirect_ctx_offset(ring) << 6; 2447 intel_lr_indirect_ctx_offset(engine) << 6;
2427 2448
2428 reg_state[CTX_BB_PER_CTX_PTR+1] = 2449 reg_state[CTX_BB_PER_CTX_PTR+1] =
2429 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 2450 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2431,16 +2452,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2431 } 2452 }
2432 } 2453 }
2433 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2454 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2434 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0); 2455 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2456 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2435 /* PDP values well be assigned later if needed */ 2457 /* PDP values well be assigned later if needed */
2436 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0); 2458 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2437 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0); 2459 0);
2438 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0); 2460 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2439 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0); 2461 0);
2440 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0); 2462 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2441 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0); 2463 0);
2442 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); 2464 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2443 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); 2465 0);
2466 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2467 0);
2468 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2469 0);
2470 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2471 0);
2472 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2473 0);
2444 2474
2445 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 2475 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2446 /* 64b PPGTT (48bit canonical) 2476 /* 64b PPGTT (48bit canonical)
@@ -2457,7 +2487,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2457 execlists_update_context_pdps(ppgtt, reg_state); 2487 execlists_update_context_pdps(ppgtt, reg_state);
2458 } 2488 }
2459 2489
2460 if (ring->id == RCS) { 2490 if (engine->id == RCS) {
2461 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2491 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2462 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2492 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2463 make_rpcs(dev)); 2493 make_rpcs(dev));
@@ -2513,15 +2543,15 @@ void intel_lr_context_free(struct intel_context *ctx)
2513 * in LRC mode, but does not include the "shared data page" used with 2543 * in LRC mode, but does not include the "shared data page" used with
2514 * GuC submission. The caller should account for this if using the GuC. 2544 * GuC submission. The caller should account for this if using the GuC.
2515 */ 2545 */
2516uint32_t intel_lr_context_size(struct intel_engine_cs *ring) 2546uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2517{ 2547{
2518 int ret = 0; 2548 int ret = 0;
2519 2549
2520 WARN_ON(INTEL_INFO(ring->dev)->gen < 8); 2550 WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
2521 2551
2522 switch (ring->id) { 2552 switch (engine->id) {
2523 case RCS: 2553 case RCS:
2524 if (INTEL_INFO(ring->dev)->gen >= 9) 2554 if (INTEL_INFO(engine->dev)->gen >= 9)
2525 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2555 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2526 else 2556 else
2527 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2557 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2537,22 +2567,22 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
2537 return ret; 2567 return ret;
2538} 2568}
2539 2569
2540static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, 2570static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
2541 struct drm_i915_gem_object *default_ctx_obj) 2571 struct drm_i915_gem_object *default_ctx_obj)
2542{ 2572{
2543 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2573 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2544 struct page *page; 2574 struct page *page;
2545 2575
2546 /* The HWSP is part of the default context object in LRC mode. */ 2576 /* The HWSP is part of the default context object in LRC mode. */
2547 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) 2577 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2548 + LRC_PPHWSP_PN * PAGE_SIZE; 2578 + LRC_PPHWSP_PN * PAGE_SIZE;
2549 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); 2579 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2550 ring->status_page.page_addr = kmap(page); 2580 engine->status_page.page_addr = kmap(page);
2551 ring->status_page.obj = default_ctx_obj; 2581 engine->status_page.obj = default_ctx_obj;
2552 2582
2553 I915_WRITE(RING_HWS_PGA(ring->mmio_base), 2583 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
2554 (u32)ring->status_page.gfx_addr); 2584 (u32)engine->status_page.gfx_addr);
2555 POSTING_READ(RING_HWS_PGA(ring->mmio_base)); 2585 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
2556} 2586}
2557 2587
2558/** 2588/**
@@ -2570,18 +2600,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
2570 */ 2600 */
2571 2601
2572int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2602int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2573 struct intel_engine_cs *ring) 2603 struct intel_engine_cs *engine)
2574{ 2604{
2575 struct drm_device *dev = ring->dev; 2605 struct drm_device *dev = engine->dev;
2576 struct drm_i915_gem_object *ctx_obj; 2606 struct drm_i915_gem_object *ctx_obj;
2577 uint32_t context_size; 2607 uint32_t context_size;
2578 struct intel_ringbuffer *ringbuf; 2608 struct intel_ringbuffer *ringbuf;
2579 int ret; 2609 int ret;
2580 2610
2581 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2611 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
2582 WARN_ON(ctx->engine[ring->id].state); 2612 WARN_ON(ctx->engine[engine->id].state);
2583 2613
2584 context_size = round_up(intel_lr_context_size(ring), 4096); 2614 context_size = round_up(intel_lr_context_size(engine), 4096);
2585 2615
2586 /* One extra page as the sharing data between driver and GuC */ 2616 /* One extra page as the sharing data between driver and GuC */
2587 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2617 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2592,32 +2622,32 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2592 return -ENOMEM; 2622 return -ENOMEM;
2593 } 2623 }
2594 2624
2595 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); 2625 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
2596 if (IS_ERR(ringbuf)) { 2626 if (IS_ERR(ringbuf)) {
2597 ret = PTR_ERR(ringbuf); 2627 ret = PTR_ERR(ringbuf);
2598 goto error_deref_obj; 2628 goto error_deref_obj;
2599 } 2629 }
2600 2630
2601 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 2631 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2602 if (ret) { 2632 if (ret) {
2603 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 2633 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2604 goto error_ringbuf; 2634 goto error_ringbuf;
2605 } 2635 }
2606 2636
2607 ctx->engine[ring->id].ringbuf = ringbuf; 2637 ctx->engine[engine->id].ringbuf = ringbuf;
2608 ctx->engine[ring->id].state = ctx_obj; 2638 ctx->engine[engine->id].state = ctx_obj;
2609 2639
2610 if (ctx != ctx->i915->kernel_context && ring->init_context) { 2640 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2611 struct drm_i915_gem_request *req; 2641 struct drm_i915_gem_request *req;
2612 2642
2613 req = i915_gem_request_alloc(ring, ctx); 2643 req = i915_gem_request_alloc(engine, ctx);
2614 if (IS_ERR(req)) { 2644 if (IS_ERR(req)) {
2615 ret = PTR_ERR(req); 2645 ret = PTR_ERR(req);
2616 DRM_ERROR("ring create req: %d\n", ret); 2646 DRM_ERROR("ring create req: %d\n", ret);
2617 goto error_ringbuf; 2647 goto error_ringbuf;
2618 } 2648 }
2619 2649
2620 ret = ring->init_context(req); 2650 ret = engine->init_context(req);
2621 if (ret) { 2651 if (ret) {
2622 DRM_ERROR("ring init context: %d\n", 2652 DRM_ERROR("ring init context: %d\n",
2623 ret); 2653 ret);
@@ -2632,8 +2662,8 @@ error_ringbuf:
2632 intel_ringbuffer_free(ringbuf); 2662 intel_ringbuffer_free(ringbuf);
2633error_deref_obj: 2663error_deref_obj:
2634 drm_gem_object_unreference(&ctx_obj->base); 2664 drm_gem_object_unreference(&ctx_obj->base);
2635 ctx->engine[ring->id].ringbuf = NULL; 2665 ctx->engine[engine->id].ringbuf = NULL;
2636 ctx->engine[ring->id].state = NULL; 2666 ctx->engine[engine->id].state = NULL;
2637 return ret; 2667 return ret;
2638} 2668}
2639 2669
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e225d0..a17cb12221ba 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -57,8 +57,8 @@
57/* Logical Rings */ 57/* Logical Rings */
58int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); 58int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
59int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); 59int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
60void intel_logical_ring_stop(struct intel_engine_cs *ring); 60void intel_logical_ring_stop(struct intel_engine_cs *engine);
61void intel_logical_ring_cleanup(struct intel_engine_cs *ring); 61void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
62int intel_logical_rings_init(struct drm_device *dev); 62int intel_logical_rings_init(struct drm_device *dev);
63int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); 63int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
64 64
@@ -98,18 +98,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
98#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 98#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
99 99
100void intel_lr_context_free(struct intel_context *ctx); 100void intel_lr_context_free(struct intel_context *ctx);
101uint32_t intel_lr_context_size(struct intel_engine_cs *ring); 101uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
102int intel_lr_context_deferred_alloc(struct intel_context *ctx, 102int intel_lr_context_deferred_alloc(struct intel_context *ctx,
103 struct intel_engine_cs *ring); 103 struct intel_engine_cs *engine);
104void intel_lr_context_unpin(struct intel_context *ctx, 104void intel_lr_context_unpin(struct intel_context *ctx,
105 struct intel_engine_cs *engine); 105 struct intel_engine_cs *engine);
106void intel_lr_context_reset(struct drm_device *dev, 106void intel_lr_context_reset(struct drm_device *dev,
107 struct intel_context *ctx); 107 struct intel_context *ctx);
108uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 108uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
109 struct intel_engine_cs *ring); 109 struct intel_engine_cs *engine);
110 110
111u32 intel_execlists_ctx_id(struct intel_context *ctx, 111u32 intel_execlists_ctx_id(struct intel_context *ctx,
112 struct intel_engine_cs *ring); 112 struct intel_engine_cs *engine);
113 113
114/* Execlists */ 114/* Execlists */
115int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 115int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -118,7 +118,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
118 struct drm_i915_gem_execbuffer2 *args, 118 struct drm_i915_gem_execbuffer2 *args,
119 struct list_head *vmas); 119 struct list_head *vmas);
120 120
121void intel_lrc_irq_handler(struct intel_engine_cs *ring); 121void intel_lrc_irq_handler(struct intel_engine_cs *engine);
122void intel_execlists_retire_requests(struct intel_engine_cs *ring); 122void intel_execlists_retire_requests(struct intel_engine_cs *engine);
123 123
124#endif /* _INTEL_LRC_H_ */ 124#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 688773aaa5e5..53237616ce19 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -59,19 +59,19 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
59 return ringbuf->space; 59 return ringbuf->space;
60} 60}
61 61
62bool intel_ring_stopped(struct intel_engine_cs *ring) 62bool intel_ring_stopped(struct intel_engine_cs *engine)
63{ 63{
64 struct drm_i915_private *dev_priv = ring->dev->dev_private; 64 struct drm_i915_private *dev_priv = engine->dev->dev_private;
65 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 65 return dev_priv->gpu_error.stop_rings & intel_ring_flag(engine);
66} 66}
67 67
68static void __intel_ring_advance(struct intel_engine_cs *ring) 68static void __intel_ring_advance(struct intel_engine_cs *engine)
69{ 69{
70 struct intel_ringbuffer *ringbuf = ring->buffer; 70 struct intel_ringbuffer *ringbuf = engine->buffer;
71 ringbuf->tail &= ringbuf->size - 1; 71 ringbuf->tail &= ringbuf->size - 1;
72 if (intel_ring_stopped(ring)) 72 if (intel_ring_stopped(engine))
73 return; 73 return;
74 ring->write_tail(ring, ringbuf->tail); 74 engine->write_tail(engine, ringbuf->tail);
75} 75}
76 76
77static int 77static int
@@ -429,51 +429,51 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
429 return gen8_emit_pipe_control(req, flags, scratch_addr); 429 return gen8_emit_pipe_control(req, flags, scratch_addr);
430} 430}
431 431
432static void ring_write_tail(struct intel_engine_cs *ring, 432static void ring_write_tail(struct intel_engine_cs *engine,
433 u32 value) 433 u32 value)
434{ 434{
435 struct drm_i915_private *dev_priv = ring->dev->dev_private; 435 struct drm_i915_private *dev_priv = engine->dev->dev_private;
436 I915_WRITE_TAIL(ring, value); 436 I915_WRITE_TAIL(engine, value);
437} 437}
438 438
439u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 439u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
440{ 440{
441 struct drm_i915_private *dev_priv = ring->dev->dev_private; 441 struct drm_i915_private *dev_priv = engine->dev->dev_private;
442 u64 acthd; 442 u64 acthd;
443 443
444 if (INTEL_INFO(ring->dev)->gen >= 8) 444 if (INTEL_INFO(engine->dev)->gen >= 8)
445 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 445 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
446 RING_ACTHD_UDW(ring->mmio_base)); 446 RING_ACTHD_UDW(engine->mmio_base));
447 else if (INTEL_INFO(ring->dev)->gen >= 4) 447 else if (INTEL_INFO(engine->dev)->gen >= 4)
448 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 448 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
449 else 449 else
450 acthd = I915_READ(ACTHD); 450 acthd = I915_READ(ACTHD);
451 451
452 return acthd; 452 return acthd;
453} 453}
454 454
455static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 455static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
456{ 456{
457 struct drm_i915_private *dev_priv = ring->dev->dev_private; 457 struct drm_i915_private *dev_priv = engine->dev->dev_private;
458 u32 addr; 458 u32 addr;
459 459
460 addr = dev_priv->status_page_dmah->busaddr; 460 addr = dev_priv->status_page_dmah->busaddr;
461 if (INTEL_INFO(ring->dev)->gen >= 4) 461 if (INTEL_INFO(engine->dev)->gen >= 4)
462 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 462 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
463 I915_WRITE(HWS_PGA, addr); 463 I915_WRITE(HWS_PGA, addr);
464} 464}
465 465
466static void intel_ring_setup_status_page(struct intel_engine_cs *ring) 466static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
467{ 467{
468 struct drm_device *dev = ring->dev; 468 struct drm_device *dev = engine->dev;
469 struct drm_i915_private *dev_priv = ring->dev->dev_private; 469 struct drm_i915_private *dev_priv = engine->dev->dev_private;
470 i915_reg_t mmio; 470 i915_reg_t mmio;
471 471
472 /* The ring status page addresses are no longer next to the rest of 472 /* The ring status page addresses are no longer next to the rest of
473 * the ring registers as of gen7. 473 * the ring registers as of gen7.
474 */ 474 */
475 if (IS_GEN7(dev)) { 475 if (IS_GEN7(dev)) {
476 switch (ring->id) { 476 switch (engine->id) {
477 case RCS: 477 case RCS:
478 mmio = RENDER_HWS_PGA_GEN7; 478 mmio = RENDER_HWS_PGA_GEN7;
479 break; 479 break;
@@ -492,14 +492,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
492 mmio = VEBOX_HWS_PGA_GEN7; 492 mmio = VEBOX_HWS_PGA_GEN7;
493 break; 493 break;
494 } 494 }
495 } else if (IS_GEN6(ring->dev)) { 495 } else if (IS_GEN6(engine->dev)) {
496 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 496 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
497 } else { 497 } else {
498 /* XXX: gen8 returns to sanity */ 498 /* XXX: gen8 returns to sanity */
499 mmio = RING_HWS_PGA(ring->mmio_base); 499 mmio = RING_HWS_PGA(engine->mmio_base);
500 } 500 }
501 501
502 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 502 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
503 POSTING_READ(mmio); 503 POSTING_READ(mmio);
504 504
505 /* 505 /*
@@ -510,10 +510,10 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
510 * invalidating the TLB? 510 * invalidating the TLB?
511 */ 511 */
512 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 512 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
513 i915_reg_t reg = RING_INSTPM(ring->mmio_base); 513 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
514 514
515 /* ring should be idle before issuing a sync flush*/ 515 /* ring should be idle before issuing a sync flush*/
516 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 516 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
517 517
518 I915_WRITE(reg, 518 I915_WRITE(reg,
519 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 519 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -521,117 +521,120 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
521 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 521 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
522 1000)) 522 1000))
523 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 523 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
524 ring->name); 524 engine->name);
525 } 525 }
526} 526}
527 527
528static bool stop_ring(struct intel_engine_cs *ring) 528static bool stop_ring(struct intel_engine_cs *engine)
529{ 529{
530 struct drm_i915_private *dev_priv = to_i915(ring->dev); 530 struct drm_i915_private *dev_priv = to_i915(engine->dev);
531 531
532 if (!IS_GEN2(ring->dev)) { 532 if (!IS_GEN2(engine->dev)) {
533 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 533 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
534 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 534 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
535 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); 535 DRM_ERROR("%s : timed out trying to stop ring\n",
536 engine->name);
536 /* Sometimes we observe that the idle flag is not 537 /* Sometimes we observe that the idle flag is not
537 * set even though the ring is empty. So double 538 * set even though the ring is empty. So double
538 * check before giving up. 539 * check before giving up.
539 */ 540 */
540 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) 541 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
541 return false; 542 return false;
542 } 543 }
543 } 544 }
544 545
545 I915_WRITE_CTL(ring, 0); 546 I915_WRITE_CTL(engine, 0);
546 I915_WRITE_HEAD(ring, 0); 547 I915_WRITE_HEAD(engine, 0);
547 ring->write_tail(ring, 0); 548 engine->write_tail(engine, 0);
548 549
549 if (!IS_GEN2(ring->dev)) { 550 if (!IS_GEN2(engine->dev)) {
550 (void)I915_READ_CTL(ring); 551 (void)I915_READ_CTL(engine);
551 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 552 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
552 } 553 }
553 554
554 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 555 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
555} 556}
556 557
557static int init_ring_common(struct intel_engine_cs *ring) 558static int init_ring_common(struct intel_engine_cs *engine)
558{ 559{
559 struct drm_device *dev = ring->dev; 560 struct drm_device *dev = engine->dev;
560 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct intel_ringbuffer *ringbuf = ring->buffer; 562 struct intel_ringbuffer *ringbuf = engine->buffer;
562 struct drm_i915_gem_object *obj = ringbuf->obj; 563 struct drm_i915_gem_object *obj = ringbuf->obj;
563 int ret = 0; 564 int ret = 0;
564 565
565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 566 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
566 567
567 if (!stop_ring(ring)) { 568 if (!stop_ring(engine)) {
568 /* G45 ring initialization often fails to reset head to zero */ 569 /* G45 ring initialization often fails to reset head to zero */
569 DRM_DEBUG_KMS("%s head not reset to zero " 570 DRM_DEBUG_KMS("%s head not reset to zero "
570 "ctl %08x head %08x tail %08x start %08x\n", 571 "ctl %08x head %08x tail %08x start %08x\n",
571 ring->name, 572 engine->name,
572 I915_READ_CTL(ring), 573 I915_READ_CTL(engine),
573 I915_READ_HEAD(ring), 574 I915_READ_HEAD(engine),
574 I915_READ_TAIL(ring), 575 I915_READ_TAIL(engine),
575 I915_READ_START(ring)); 576 I915_READ_START(engine));
576 577
577 if (!stop_ring(ring)) { 578 if (!stop_ring(engine)) {
578 DRM_ERROR("failed to set %s head to zero " 579 DRM_ERROR("failed to set %s head to zero "
579 "ctl %08x head %08x tail %08x start %08x\n", 580 "ctl %08x head %08x tail %08x start %08x\n",
580 ring->name, 581 engine->name,
581 I915_READ_CTL(ring), 582 I915_READ_CTL(engine),
582 I915_READ_HEAD(ring), 583 I915_READ_HEAD(engine),
583 I915_READ_TAIL(ring), 584 I915_READ_TAIL(engine),
584 I915_READ_START(ring)); 585 I915_READ_START(engine));
585 ret = -EIO; 586 ret = -EIO;
586 goto out; 587 goto out;
587 } 588 }
588 } 589 }
589 590
590 if (I915_NEED_GFX_HWS(dev)) 591 if (I915_NEED_GFX_HWS(dev))
591 intel_ring_setup_status_page(ring); 592 intel_ring_setup_status_page(engine);
592 else 593 else
593 ring_setup_phys_status_page(ring); 594 ring_setup_phys_status_page(engine);
594 595
595 /* Enforce ordering by reading HEAD register back */ 596 /* Enforce ordering by reading HEAD register back */
596 I915_READ_HEAD(ring); 597 I915_READ_HEAD(engine);
597 598
598 /* Initialize the ring. This must happen _after_ we've cleared the ring 599 /* Initialize the ring. This must happen _after_ we've cleared the ring
599 * registers with the above sequence (the readback of the HEAD registers 600 * registers with the above sequence (the readback of the HEAD registers
600 * also enforces ordering), otherwise the hw might lose the new ring 601 * also enforces ordering), otherwise the hw might lose the new ring
601 * register values. */ 602 * register values. */
602 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 603 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
603 604
604 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 605 /* WaClearRingBufHeadRegAtInit:ctg,elk */
605 if (I915_READ_HEAD(ring)) 606 if (I915_READ_HEAD(engine))
606 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 607 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
607 ring->name, I915_READ_HEAD(ring)); 608 engine->name, I915_READ_HEAD(engine));
608 I915_WRITE_HEAD(ring, 0); 609 I915_WRITE_HEAD(engine, 0);
609 (void)I915_READ_HEAD(ring); 610 (void)I915_READ_HEAD(engine);
610 611
611 I915_WRITE_CTL(ring, 612 I915_WRITE_CTL(engine,
612 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 613 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
613 | RING_VALID); 614 | RING_VALID);
614 615
615 /* If the head is still not zero, the ring is dead */ 616 /* If the head is still not zero, the ring is dead */
616 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 617 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
617 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 618 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
618 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 619 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
619 DRM_ERROR("%s initialization failed " 620 DRM_ERROR("%s initialization failed "
620 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 621 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
621 ring->name, 622 engine->name,
622 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 623 I915_READ_CTL(engine),
623 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 624 I915_READ_CTL(engine) & RING_VALID,
624 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 625 I915_READ_HEAD(engine), I915_READ_TAIL(engine),
626 I915_READ_START(engine),
627 (unsigned long)i915_gem_obj_ggtt_offset(obj));
625 ret = -EIO; 628 ret = -EIO;
626 goto out; 629 goto out;
627 } 630 }
628 631
629 ringbuf->last_retired_head = -1; 632 ringbuf->last_retired_head = -1;
630 ringbuf->head = I915_READ_HEAD(ring); 633 ringbuf->head = I915_READ_HEAD(engine);
631 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 634 ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
632 intel_ring_update_space(ringbuf); 635 intel_ring_update_space(ringbuf);
633 636
634 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 637 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
635 638
636out: 639out:
637 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 640 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -640,59 +643,60 @@ out:
640} 643}
641 644
642void 645void
643intel_fini_pipe_control(struct intel_engine_cs *ring) 646intel_fini_pipe_control(struct intel_engine_cs *engine)
644{ 647{
645 struct drm_device *dev = ring->dev; 648 struct drm_device *dev = engine->dev;
646 649
647 if (ring->scratch.obj == NULL) 650 if (engine->scratch.obj == NULL)
648 return; 651 return;
649 652
650 if (INTEL_INFO(dev)->gen >= 5) { 653 if (INTEL_INFO(dev)->gen >= 5) {
651 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 654 kunmap(sg_page(engine->scratch.obj->pages->sgl));
652 i915_gem_object_ggtt_unpin(ring->scratch.obj); 655 i915_gem_object_ggtt_unpin(engine->scratch.obj);
653 } 656 }
654 657
655 drm_gem_object_unreference(&ring->scratch.obj->base); 658 drm_gem_object_unreference(&engine->scratch.obj->base);
656 ring->scratch.obj = NULL; 659 engine->scratch.obj = NULL;
657} 660}
658 661
659int 662int
660intel_init_pipe_control(struct intel_engine_cs *ring) 663intel_init_pipe_control(struct intel_engine_cs *engine)
661{ 664{
662 int ret; 665 int ret;
663 666
664 WARN_ON(ring->scratch.obj); 667 WARN_ON(engine->scratch.obj);
665 668
666 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 669 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
667 if (ring->scratch.obj == NULL) { 670 if (engine->scratch.obj == NULL) {
668 DRM_ERROR("Failed to allocate seqno page\n"); 671 DRM_ERROR("Failed to allocate seqno page\n");
669 ret = -ENOMEM; 672 ret = -ENOMEM;
670 goto err; 673 goto err;
671 } 674 }
672 675
673 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 676 ret = i915_gem_object_set_cache_level(engine->scratch.obj,
677 I915_CACHE_LLC);
674 if (ret) 678 if (ret)
675 goto err_unref; 679 goto err_unref;
676 680
677 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 681 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
678 if (ret) 682 if (ret)
679 goto err_unref; 683 goto err_unref;
680 684
681 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 685 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
682 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 686 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
683 if (ring->scratch.cpu_page == NULL) { 687 if (engine->scratch.cpu_page == NULL) {
684 ret = -ENOMEM; 688 ret = -ENOMEM;
685 goto err_unpin; 689 goto err_unpin;
686 } 690 }
687 691
688 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 692 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
689 ring->name, ring->scratch.gtt_offset); 693 engine->name, engine->scratch.gtt_offset);
690 return 0; 694 return 0;
691 695
692err_unpin: 696err_unpin:
693 i915_gem_object_ggtt_unpin(ring->scratch.obj); 697 i915_gem_object_ggtt_unpin(engine->scratch.obj);
694err_unref: 698err_unref:
695 drm_gem_object_unreference(&ring->scratch.obj->base); 699 drm_gem_object_unreference(&engine->scratch.obj->base);
696err: 700err:
697 return ret; 701 return ret;
698} 702}
@@ -789,25 +793,26 @@ static int wa_add(struct drm_i915_private *dev_priv,
789 793
790#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 794#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
791 795
792static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg) 796static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
797 i915_reg_t reg)
793{ 798{
794 struct drm_i915_private *dev_priv = ring->dev->dev_private; 799 struct drm_i915_private *dev_priv = engine->dev->dev_private;
795 struct i915_workarounds *wa = &dev_priv->workarounds; 800 struct i915_workarounds *wa = &dev_priv->workarounds;
796 const uint32_t index = wa->hw_whitelist_count[ring->id]; 801 const uint32_t index = wa->hw_whitelist_count[engine->id];
797 802
798 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) 803 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
799 return -EINVAL; 804 return -EINVAL;
800 805
801 WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index), 806 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
802 i915_mmio_reg_offset(reg)); 807 i915_mmio_reg_offset(reg));
803 wa->hw_whitelist_count[ring->id]++; 808 wa->hw_whitelist_count[engine->id]++;
804 809
805 return 0; 810 return 0;
806} 811}
807 812
808static int gen8_init_workarounds(struct intel_engine_cs *ring) 813static int gen8_init_workarounds(struct intel_engine_cs *engine)
809{ 814{
810 struct drm_device *dev = ring->dev; 815 struct drm_device *dev = engine->dev;
811 struct drm_i915_private *dev_priv = dev->dev_private; 816 struct drm_i915_private *dev_priv = dev->dev_private;
812 817
813 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 818 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -857,13 +862,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring)
857 return 0; 862 return 0;
858} 863}
859 864
860static int bdw_init_workarounds(struct intel_engine_cs *ring) 865static int bdw_init_workarounds(struct intel_engine_cs *engine)
861{ 866{
862 int ret; 867 int ret;
863 struct drm_device *dev = ring->dev; 868 struct drm_device *dev = engine->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private; 869 struct drm_i915_private *dev_priv = dev->dev_private;
865 870
866 ret = gen8_init_workarounds(ring); 871 ret = gen8_init_workarounds(engine);
867 if (ret) 872 if (ret)
868 return ret; 873 return ret;
869 874
@@ -886,13 +891,13 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
886 return 0; 891 return 0;
887} 892}
888 893
889static int chv_init_workarounds(struct intel_engine_cs *ring) 894static int chv_init_workarounds(struct intel_engine_cs *engine)
890{ 895{
891 int ret; 896 int ret;
892 struct drm_device *dev = ring->dev; 897 struct drm_device *dev = engine->dev;
893 struct drm_i915_private *dev_priv = dev->dev_private; 898 struct drm_i915_private *dev_priv = dev->dev_private;
894 899
895 ret = gen8_init_workarounds(ring); 900 ret = gen8_init_workarounds(engine);
896 if (ret) 901 if (ret)
897 return ret; 902 return ret;
898 903
@@ -905,9 +910,9 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
905 return 0; 910 return 0;
906} 911}
907 912
908static int gen9_init_workarounds(struct intel_engine_cs *ring) 913static int gen9_init_workarounds(struct intel_engine_cs *engine)
909{ 914{
910 struct drm_device *dev = ring->dev; 915 struct drm_device *dev = engine->dev;
911 struct drm_i915_private *dev_priv = dev->dev_private; 916 struct drm_i915_private *dev_priv = dev->dev_private;
912 uint32_t tmp; 917 uint32_t tmp;
913 int ret; 918 int ret;
@@ -986,21 +991,21 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
986 GEN8_LQSC_FLUSH_COHERENT_LINES)); 991 GEN8_LQSC_FLUSH_COHERENT_LINES));
987 992
988 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ 993 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
989 ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1); 994 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
990 if (ret) 995 if (ret)
991 return ret; 996 return ret;
992 997
993 /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ 998 /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
994 ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1); 999 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
995 if (ret) 1000 if (ret)
996 return ret; 1001 return ret;
997 1002
998 return 0; 1003 return 0;
999} 1004}
1000 1005
1001static int skl_tune_iz_hashing(struct intel_engine_cs *ring) 1006static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1002{ 1007{
1003 struct drm_device *dev = ring->dev; 1008 struct drm_device *dev = engine->dev;
1004 struct drm_i915_private *dev_priv = dev->dev_private; 1009 struct drm_i915_private *dev_priv = dev->dev_private;
1005 u8 vals[3] = { 0, 0, 0 }; 1010 u8 vals[3] = { 0, 0, 0 };
1006 unsigned int i; 1011 unsigned int i;
@@ -1040,13 +1045,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
1040 return 0; 1045 return 0;
1041} 1046}
1042 1047
1043static int skl_init_workarounds(struct intel_engine_cs *ring) 1048static int skl_init_workarounds(struct intel_engine_cs *engine)
1044{ 1049{
1045 int ret; 1050 int ret;
1046 struct drm_device *dev = ring->dev; 1051 struct drm_device *dev = engine->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1052 struct drm_i915_private *dev_priv = dev->dev_private;
1048 1053
1049 ret = gen9_init_workarounds(ring); 1054 ret = gen9_init_workarounds(engine);
1050 if (ret) 1055 if (ret)
1051 return ret; 1056 return ret;
1052 1057
@@ -1113,20 +1118,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1113 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1118 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1114 1119
1115 /* WaDisableLSQCROPERFforOCL:skl */ 1120 /* WaDisableLSQCROPERFforOCL:skl */
1116 ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); 1121 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1117 if (ret) 1122 if (ret)
1118 return ret; 1123 return ret;
1119 1124
1120 return skl_tune_iz_hashing(ring); 1125 return skl_tune_iz_hashing(engine);
1121} 1126}
1122 1127
1123static int bxt_init_workarounds(struct intel_engine_cs *ring) 1128static int bxt_init_workarounds(struct intel_engine_cs *engine)
1124{ 1129{
1125 int ret; 1130 int ret;
1126 struct drm_device *dev = ring->dev; 1131 struct drm_device *dev = engine->dev;
1127 struct drm_i915_private *dev_priv = dev->dev_private; 1132 struct drm_i915_private *dev_priv = dev->dev_private;
1128 1133
1129 ret = gen9_init_workarounds(ring); 1134 ret = gen9_init_workarounds(engine);
1130 if (ret) 1135 if (ret)
1131 return ret; 1136 return ret;
1132 1137
@@ -1157,11 +1162,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1157 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1162 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1158 /* WaDisableLSQCROPERFforOCL:bxt */ 1163 /* WaDisableLSQCROPERFforOCL:bxt */
1159 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1164 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1160 ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1); 1165 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1161 if (ret) 1166 if (ret)
1162 return ret; 1167 return ret;
1163 1168
1164 ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); 1169 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1165 if (ret) 1170 if (ret)
1166 return ret; 1171 return ret;
1167 } 1172 }
@@ -1169,36 +1174,36 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
1169 return 0; 1174 return 0;
1170} 1175}
1171 1176
1172int init_workarounds_ring(struct intel_engine_cs *ring) 1177int init_workarounds_ring(struct intel_engine_cs *engine)
1173{ 1178{
1174 struct drm_device *dev = ring->dev; 1179 struct drm_device *dev = engine->dev;
1175 struct drm_i915_private *dev_priv = dev->dev_private; 1180 struct drm_i915_private *dev_priv = dev->dev_private;
1176 1181
1177 WARN_ON(ring->id != RCS); 1182 WARN_ON(engine->id != RCS);
1178 1183
1179 dev_priv->workarounds.count = 0; 1184 dev_priv->workarounds.count = 0;
1180 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1185 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1181 1186
1182 if (IS_BROADWELL(dev)) 1187 if (IS_BROADWELL(dev))
1183 return bdw_init_workarounds(ring); 1188 return bdw_init_workarounds(engine);
1184 1189
1185 if (IS_CHERRYVIEW(dev)) 1190 if (IS_CHERRYVIEW(dev))
1186 return chv_init_workarounds(ring); 1191 return chv_init_workarounds(engine);
1187 1192
1188 if (IS_SKYLAKE(dev)) 1193 if (IS_SKYLAKE(dev))
1189 return skl_init_workarounds(ring); 1194 return skl_init_workarounds(engine);
1190 1195
1191 if (IS_BROXTON(dev)) 1196 if (IS_BROXTON(dev))
1192 return bxt_init_workarounds(ring); 1197 return bxt_init_workarounds(engine);
1193 1198
1194 return 0; 1199 return 0;
1195} 1200}
1196 1201
1197static int init_render_ring(struct intel_engine_cs *ring) 1202static int init_render_ring(struct intel_engine_cs *engine)
1198{ 1203{
1199 struct drm_device *dev = ring->dev; 1204 struct drm_device *dev = engine->dev;
1200 struct drm_i915_private *dev_priv = dev->dev_private; 1205 struct drm_i915_private *dev_priv = dev->dev_private;
1201 int ret = init_ring_common(ring); 1206 int ret = init_ring_common(engine);
1202 if (ret) 1207 if (ret)
1203 return ret; 1208 return ret;
1204 1209
@@ -1241,14 +1246,14 @@ static int init_render_ring(struct intel_engine_cs *ring)
1241 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1246 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1242 1247
1243 if (HAS_L3_DPF(dev)) 1248 if (HAS_L3_DPF(dev))
1244 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1249 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
1245 1250
1246 return init_workarounds_ring(ring); 1251 return init_workarounds_ring(engine);
1247} 1252}
1248 1253
1249static void render_ring_cleanup(struct intel_engine_cs *ring) 1254static void render_ring_cleanup(struct intel_engine_cs *engine)
1250{ 1255{
1251 struct drm_device *dev = ring->dev; 1256 struct drm_device *dev = engine->dev;
1252 struct drm_i915_private *dev_priv = dev->dev_private; 1257 struct drm_i915_private *dev_priv = dev->dev_private;
1253 1258
1254 if (dev_priv->semaphore_obj) { 1259 if (dev_priv->semaphore_obj) {
@@ -1257,7 +1262,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
1257 dev_priv->semaphore_obj = NULL; 1262 dev_priv->semaphore_obj = NULL;
1258 } 1263 }
1259 1264
1260 intel_fini_pipe_control(ring); 1265 intel_fini_pipe_control(engine);
1261} 1266}
1262 1267
1263static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1268static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
@@ -1554,47 +1559,47 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1554} 1559}
1555 1560
1556static u32 1561static u32
1557gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1562gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1558{ 1563{
1559 /* Workaround to force correct ordering between irq and seqno writes on 1564 /* Workaround to force correct ordering between irq and seqno writes on
1560 * ivb (and maybe also on snb) by reading from a CS register (like 1565 * ivb (and maybe also on snb) by reading from a CS register (like
1561 * ACTHD) before reading the status page. */ 1566 * ACTHD) before reading the status page. */
1562 if (!lazy_coherency) { 1567 if (!lazy_coherency) {
1563 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1568 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1564 POSTING_READ(RING_ACTHD(ring->mmio_base)); 1569 POSTING_READ(RING_ACTHD(engine->mmio_base));
1565 } 1570 }
1566 1571
1567 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1572 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1568} 1573}
1569 1574
1570static u32 1575static u32
1571ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1576ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1572{ 1577{
1573 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1578 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1574} 1579}
1575 1580
1576static void 1581static void
1577ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1582ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1578{ 1583{
1579 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1584 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1580} 1585}
1581 1586
1582static u32 1587static u32
1583pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1588pc_render_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1584{ 1589{
1585 return ring->scratch.cpu_page[0]; 1590 return engine->scratch.cpu_page[0];
1586} 1591}
1587 1592
1588static void 1593static void
1589pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1594pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1590{ 1595{
1591 ring->scratch.cpu_page[0] = seqno; 1596 engine->scratch.cpu_page[0] = seqno;
1592} 1597}
1593 1598
1594static bool 1599static bool
1595gen5_ring_get_irq(struct intel_engine_cs *ring) 1600gen5_ring_get_irq(struct intel_engine_cs *engine)
1596{ 1601{
1597 struct drm_device *dev = ring->dev; 1602 struct drm_device *dev = engine->dev;
1598 struct drm_i915_private *dev_priv = dev->dev_private; 1603 struct drm_i915_private *dev_priv = dev->dev_private;
1599 unsigned long flags; 1604 unsigned long flags;
1600 1605
@@ -1602,30 +1607,30 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
1602 return false; 1607 return false;
1603 1608
1604 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1609 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1605 if (ring->irq_refcount++ == 0) 1610 if (engine->irq_refcount++ == 0)
1606 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1611 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1607 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1612 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1608 1613
1609 return true; 1614 return true;
1610} 1615}
1611 1616
1612static void 1617static void
1613gen5_ring_put_irq(struct intel_engine_cs *ring) 1618gen5_ring_put_irq(struct intel_engine_cs *engine)
1614{ 1619{
1615 struct drm_device *dev = ring->dev; 1620 struct drm_device *dev = engine->dev;
1616 struct drm_i915_private *dev_priv = dev->dev_private; 1621 struct drm_i915_private *dev_priv = dev->dev_private;
1617 unsigned long flags; 1622 unsigned long flags;
1618 1623
1619 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1624 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1620 if (--ring->irq_refcount == 0) 1625 if (--engine->irq_refcount == 0)
1621 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1626 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1622 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1627 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1623} 1628}
1624 1629
1625static bool 1630static bool
1626i9xx_ring_get_irq(struct intel_engine_cs *ring) 1631i9xx_ring_get_irq(struct intel_engine_cs *engine)
1627{ 1632{
1628 struct drm_device *dev = ring->dev; 1633 struct drm_device *dev = engine->dev;
1629 struct drm_i915_private *dev_priv = dev->dev_private; 1634 struct drm_i915_private *dev_priv = dev->dev_private;
1630 unsigned long flags; 1635 unsigned long flags;
1631 1636
@@ -1633,8 +1638,8 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
1633 return false; 1638 return false;
1634 1639
1635 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1640 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1636 if (ring->irq_refcount++ == 0) { 1641 if (engine->irq_refcount++ == 0) {
1637 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1642 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1638 I915_WRITE(IMR, dev_priv->irq_mask); 1643 I915_WRITE(IMR, dev_priv->irq_mask);
1639 POSTING_READ(IMR); 1644 POSTING_READ(IMR);
1640 } 1645 }
@@ -1644,15 +1649,15 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
1644} 1649}
1645 1650
1646static void 1651static void
1647i9xx_ring_put_irq(struct intel_engine_cs *ring) 1652i9xx_ring_put_irq(struct intel_engine_cs *engine)
1648{ 1653{
1649 struct drm_device *dev = ring->dev; 1654 struct drm_device *dev = engine->dev;
1650 struct drm_i915_private *dev_priv = dev->dev_private; 1655 struct drm_i915_private *dev_priv = dev->dev_private;
1651 unsigned long flags; 1656 unsigned long flags;
1652 1657
1653 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1658 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1654 if (--ring->irq_refcount == 0) { 1659 if (--engine->irq_refcount == 0) {
1655 dev_priv->irq_mask |= ring->irq_enable_mask; 1660 dev_priv->irq_mask |= engine->irq_enable_mask;
1656 I915_WRITE(IMR, dev_priv->irq_mask); 1661 I915_WRITE(IMR, dev_priv->irq_mask);
1657 POSTING_READ(IMR); 1662 POSTING_READ(IMR);
1658 } 1663 }
@@ -1660,9 +1665,9 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring)
1660} 1665}
1661 1666
1662static bool 1667static bool
1663i8xx_ring_get_irq(struct intel_engine_cs *ring) 1668i8xx_ring_get_irq(struct intel_engine_cs *engine)
1664{ 1669{
1665 struct drm_device *dev = ring->dev; 1670 struct drm_device *dev = engine->dev;
1666 struct drm_i915_private *dev_priv = dev->dev_private; 1671 struct drm_i915_private *dev_priv = dev->dev_private;
1667 unsigned long flags; 1672 unsigned long flags;
1668 1673
@@ -1670,8 +1675,8 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
1670 return false; 1675 return false;
1671 1676
1672 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1677 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1673 if (ring->irq_refcount++ == 0) { 1678 if (engine->irq_refcount++ == 0) {
1674 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1679 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1675 I915_WRITE16(IMR, dev_priv->irq_mask); 1680 I915_WRITE16(IMR, dev_priv->irq_mask);
1676 POSTING_READ16(IMR); 1681 POSTING_READ16(IMR);
1677 } 1682 }
@@ -1681,15 +1686,15 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
1681} 1686}
1682 1687
1683static void 1688static void
1684i8xx_ring_put_irq(struct intel_engine_cs *ring) 1689i8xx_ring_put_irq(struct intel_engine_cs *engine)
1685{ 1690{
1686 struct drm_device *dev = ring->dev; 1691 struct drm_device *dev = engine->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1688 unsigned long flags; 1693 unsigned long flags;
1689 1694
1690 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1695 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1691 if (--ring->irq_refcount == 0) { 1696 if (--engine->irq_refcount == 0) {
1692 dev_priv->irq_mask |= ring->irq_enable_mask; 1697 dev_priv->irq_mask |= engine->irq_enable_mask;
1693 I915_WRITE16(IMR, dev_priv->irq_mask); 1698 I915_WRITE16(IMR, dev_priv->irq_mask);
1694 POSTING_READ16(IMR); 1699 POSTING_READ16(IMR);
1695 } 1700 }
@@ -1735,9 +1740,9 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1735} 1740}
1736 1741
1737static bool 1742static bool
1738gen6_ring_get_irq(struct intel_engine_cs *ring) 1743gen6_ring_get_irq(struct intel_engine_cs *engine)
1739{ 1744{
1740 struct drm_device *dev = ring->dev; 1745 struct drm_device *dev = engine->dev;
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1746 struct drm_i915_private *dev_priv = dev->dev_private;
1742 unsigned long flags; 1747 unsigned long flags;
1743 1748
@@ -1745,14 +1750,14 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1745 return false; 1750 return false;
1746 1751
1747 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1752 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1748 if (ring->irq_refcount++ == 0) { 1753 if (engine->irq_refcount++ == 0) {
1749 if (HAS_L3_DPF(dev) && ring->id == RCS) 1754 if (HAS_L3_DPF(dev) && engine->id == RCS)
1750 I915_WRITE_IMR(ring, 1755 I915_WRITE_IMR(engine,
1751 ~(ring->irq_enable_mask | 1756 ~(engine->irq_enable_mask |
1752 GT_PARITY_ERROR(dev))); 1757 GT_PARITY_ERROR(dev)));
1753 else 1758 else
1754 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1759 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1755 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1760 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1756 } 1761 }
1757 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1762 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1758 1763
@@ -1760,27 +1765,27 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1760} 1765}
1761 1766
1762static void 1767static void
1763gen6_ring_put_irq(struct intel_engine_cs *ring) 1768gen6_ring_put_irq(struct intel_engine_cs *engine)
1764{ 1769{
1765 struct drm_device *dev = ring->dev; 1770 struct drm_device *dev = engine->dev;
1766 struct drm_i915_private *dev_priv = dev->dev_private; 1771 struct drm_i915_private *dev_priv = dev->dev_private;
1767 unsigned long flags; 1772 unsigned long flags;
1768 1773
1769 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1774 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1770 if (--ring->irq_refcount == 0) { 1775 if (--engine->irq_refcount == 0) {
1771 if (HAS_L3_DPF(dev) && ring->id == RCS) 1776 if (HAS_L3_DPF(dev) && engine->id == RCS)
1772 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1777 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
1773 else 1778 else
1774 I915_WRITE_IMR(ring, ~0); 1779 I915_WRITE_IMR(engine, ~0);
1775 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1780 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1776 } 1781 }
1777 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1782 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1778} 1783}
1779 1784
1780static bool 1785static bool
1781hsw_vebox_get_irq(struct intel_engine_cs *ring) 1786hsw_vebox_get_irq(struct intel_engine_cs *engine)
1782{ 1787{
1783 struct drm_device *dev = ring->dev; 1788 struct drm_device *dev = engine->dev;
1784 struct drm_i915_private *dev_priv = dev->dev_private; 1789 struct drm_i915_private *dev_priv = dev->dev_private;
1785 unsigned long flags; 1790 unsigned long flags;
1786 1791
@@ -1788,9 +1793,9 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1788 return false; 1793 return false;
1789 1794
1790 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1795 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1791 if (ring->irq_refcount++ == 0) { 1796 if (engine->irq_refcount++ == 0) {
1792 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1797 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1793 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1798 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1794 } 1799 }
1795 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1800 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1796 1801
@@ -1798,24 +1803,24 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1798} 1803}
1799 1804
1800static void 1805static void
1801hsw_vebox_put_irq(struct intel_engine_cs *ring) 1806hsw_vebox_put_irq(struct intel_engine_cs *engine)
1802{ 1807{
1803 struct drm_device *dev = ring->dev; 1808 struct drm_device *dev = engine->dev;
1804 struct drm_i915_private *dev_priv = dev->dev_private; 1809 struct drm_i915_private *dev_priv = dev->dev_private;
1805 unsigned long flags; 1810 unsigned long flags;
1806 1811
1807 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1812 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1808 if (--ring->irq_refcount == 0) { 1813 if (--engine->irq_refcount == 0) {
1809 I915_WRITE_IMR(ring, ~0); 1814 I915_WRITE_IMR(engine, ~0);
1810 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1815 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1811 } 1816 }
1812 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1817 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1813} 1818}
1814 1819
1815static bool 1820static bool
1816gen8_ring_get_irq(struct intel_engine_cs *ring) 1821gen8_ring_get_irq(struct intel_engine_cs *engine)
1817{ 1822{
1818 struct drm_device *dev = ring->dev; 1823 struct drm_device *dev = engine->dev;
1819 struct drm_i915_private *dev_priv = dev->dev_private; 1824 struct drm_i915_private *dev_priv = dev->dev_private;
1820 unsigned long flags; 1825 unsigned long flags;
1821 1826
@@ -1823,15 +1828,15 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
1823 return false; 1828 return false;
1824 1829
1825 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1830 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1826 if (ring->irq_refcount++ == 0) { 1831 if (engine->irq_refcount++ == 0) {
1827 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1832 if (HAS_L3_DPF(dev) && engine->id == RCS) {
1828 I915_WRITE_IMR(ring, 1833 I915_WRITE_IMR(engine,
1829 ~(ring->irq_enable_mask | 1834 ~(engine->irq_enable_mask |
1830 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1835 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1831 } else { 1836 } else {
1832 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1837 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1833 } 1838 }
1834 POSTING_READ(RING_IMR(ring->mmio_base)); 1839 POSTING_READ(RING_IMR(engine->mmio_base));
1835 } 1840 }
1836 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1841 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1837 1842
@@ -1839,21 +1844,21 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
1839} 1844}
1840 1845
1841static void 1846static void
1842gen8_ring_put_irq(struct intel_engine_cs *ring) 1847gen8_ring_put_irq(struct intel_engine_cs *engine)
1843{ 1848{
1844 struct drm_device *dev = ring->dev; 1849 struct drm_device *dev = engine->dev;
1845 struct drm_i915_private *dev_priv = dev->dev_private; 1850 struct drm_i915_private *dev_priv = dev->dev_private;
1846 unsigned long flags; 1851 unsigned long flags;
1847 1852
1848 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1853 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1849 if (--ring->irq_refcount == 0) { 1854 if (--engine->irq_refcount == 0) {
1850 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1855 if (HAS_L3_DPF(dev) && engine->id == RCS) {
1851 I915_WRITE_IMR(ring, 1856 I915_WRITE_IMR(engine,
1852 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1857 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1853 } else { 1858 } else {
1854 I915_WRITE_IMR(ring, ~0); 1859 I915_WRITE_IMR(engine, ~0);
1855 } 1860 }
1856 POSTING_READ(RING_IMR(ring->mmio_base)); 1861 POSTING_READ(RING_IMR(engine->mmio_base));
1857 } 1862 }
1858 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1863 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1859} 1864}
@@ -1967,40 +1972,40 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1967 return 0; 1972 return 0;
1968} 1973}
1969 1974
1970static void cleanup_phys_status_page(struct intel_engine_cs *ring) 1975static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1971{ 1976{
1972 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1977 struct drm_i915_private *dev_priv = to_i915(engine->dev);
1973 1978
1974 if (!dev_priv->status_page_dmah) 1979 if (!dev_priv->status_page_dmah)
1975 return; 1980 return;
1976 1981
1977 drm_pci_free(ring->dev, dev_priv->status_page_dmah); 1982 drm_pci_free(engine->dev, dev_priv->status_page_dmah);
1978 ring->status_page.page_addr = NULL; 1983 engine->status_page.page_addr = NULL;
1979} 1984}
1980 1985
1981static void cleanup_status_page(struct intel_engine_cs *ring) 1986static void cleanup_status_page(struct intel_engine_cs *engine)
1982{ 1987{
1983 struct drm_i915_gem_object *obj; 1988 struct drm_i915_gem_object *obj;
1984 1989
1985 obj = ring->status_page.obj; 1990 obj = engine->status_page.obj;
1986 if (obj == NULL) 1991 if (obj == NULL)
1987 return; 1992 return;
1988 1993
1989 kunmap(sg_page(obj->pages->sgl)); 1994 kunmap(sg_page(obj->pages->sgl));
1990 i915_gem_object_ggtt_unpin(obj); 1995 i915_gem_object_ggtt_unpin(obj);
1991 drm_gem_object_unreference(&obj->base); 1996 drm_gem_object_unreference(&obj->base);
1992 ring->status_page.obj = NULL; 1997 engine->status_page.obj = NULL;
1993} 1998}
1994 1999
1995static int init_status_page(struct intel_engine_cs *ring) 2000static int init_status_page(struct intel_engine_cs *engine)
1996{ 2001{
1997 struct drm_i915_gem_object *obj = ring->status_page.obj; 2002 struct drm_i915_gem_object *obj = engine->status_page.obj;
1998 2003
1999 if (obj == NULL) { 2004 if (obj == NULL) {
2000 unsigned flags; 2005 unsigned flags;
2001 int ret; 2006 int ret;
2002 2007
2003 obj = i915_gem_alloc_object(ring->dev, 4096); 2008 obj = i915_gem_alloc_object(engine->dev, 4096);
2004 if (obj == NULL) { 2009 if (obj == NULL) {
2005 DRM_ERROR("Failed to allocate status page\n"); 2010 DRM_ERROR("Failed to allocate status page\n");
2006 return -ENOMEM; 2011 return -ENOMEM;
@@ -2011,7 +2016,7 @@ static int init_status_page(struct intel_engine_cs *ring)
2011 goto err_unref; 2016 goto err_unref;
2012 2017
2013 flags = 0; 2018 flags = 0;
2014 if (!HAS_LLC(ring->dev)) 2019 if (!HAS_LLC(engine->dev))
2015 /* On g33, we cannot place HWS above 256MiB, so 2020 /* On g33, we cannot place HWS above 256MiB, so
2016 * restrict its pinning to the low mappable arena. 2021 * restrict its pinning to the low mappable arena.
2017 * Though this restriction is not documented for 2022 * Though this restriction is not documented for
@@ -2030,32 +2035,32 @@ err_unref:
2030 return ret; 2035 return ret;
2031 } 2036 }
2032 2037
2033 ring->status_page.obj = obj; 2038 engine->status_page.obj = obj;
2034 } 2039 }
2035 2040
2036 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 2041 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
2037 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 2042 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
2038 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 2043 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
2039 2044
2040 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 2045 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
2041 ring->name, ring->status_page.gfx_addr); 2046 engine->name, engine->status_page.gfx_addr);
2042 2047
2043 return 0; 2048 return 0;
2044} 2049}
2045 2050
2046static int init_phys_status_page(struct intel_engine_cs *ring) 2051static int init_phys_status_page(struct intel_engine_cs *engine)
2047{ 2052{
2048 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2053 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2049 2054
2050 if (!dev_priv->status_page_dmah) { 2055 if (!dev_priv->status_page_dmah) {
2051 dev_priv->status_page_dmah = 2056 dev_priv->status_page_dmah =
2052 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 2057 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
2053 if (!dev_priv->status_page_dmah) 2058 if (!dev_priv->status_page_dmah)
2054 return -ENOMEM; 2059 return -ENOMEM;
2055 } 2060 }
2056 2061
2057 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 2062 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
2058 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 2063 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
2059 2064
2060 return 0; 2065 return 0;
2061} 2066}
@@ -2218,37 +2223,38 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2218} 2223}
2219 2224
2220static int intel_init_ring_buffer(struct drm_device *dev, 2225static int intel_init_ring_buffer(struct drm_device *dev,
2221 struct intel_engine_cs *ring) 2226 struct intel_engine_cs *engine)
2222{ 2227{
2223 struct intel_ringbuffer *ringbuf; 2228 struct intel_ringbuffer *ringbuf;
2224 int ret; 2229 int ret;
2225 2230
2226 WARN_ON(ring->buffer); 2231 WARN_ON(engine->buffer);
2227 2232
2228 ring->dev = dev; 2233 engine->dev = dev;
2229 INIT_LIST_HEAD(&ring->active_list); 2234 INIT_LIST_HEAD(&engine->active_list);
2230 INIT_LIST_HEAD(&ring->request_list); 2235 INIT_LIST_HEAD(&engine->request_list);
2231 INIT_LIST_HEAD(&ring->execlist_queue); 2236 INIT_LIST_HEAD(&engine->execlist_queue);
2232 INIT_LIST_HEAD(&ring->buffers); 2237 INIT_LIST_HEAD(&engine->buffers);
2233 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2238 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2234 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2239 memset(engine->semaphore.sync_seqno, 0,
2240 sizeof(engine->semaphore.sync_seqno));
2235 2241
2236 init_waitqueue_head(&ring->irq_queue); 2242 init_waitqueue_head(&engine->irq_queue);
2237 2243
2238 ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); 2244 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
2239 if (IS_ERR(ringbuf)) { 2245 if (IS_ERR(ringbuf)) {
2240 ret = PTR_ERR(ringbuf); 2246 ret = PTR_ERR(ringbuf);
2241 goto error; 2247 goto error;
2242 } 2248 }
2243 ring->buffer = ringbuf; 2249 engine->buffer = ringbuf;
2244 2250
2245 if (I915_NEED_GFX_HWS(dev)) { 2251 if (I915_NEED_GFX_HWS(dev)) {
2246 ret = init_status_page(ring); 2252 ret = init_status_page(engine);
2247 if (ret) 2253 if (ret)
2248 goto error; 2254 goto error;
2249 } else { 2255 } else {
2250 WARN_ON(ring->id != RCS); 2256 WARN_ON(engine->id != RCS);
2251 ret = init_phys_status_page(ring); 2257 ret = init_phys_status_page(engine);
2252 if (ret) 2258 if (ret)
2253 goto error; 2259 goto error;
2254 } 2260 }
@@ -2256,58 +2262,58 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2256 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2262 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
2257 if (ret) { 2263 if (ret) {
2258 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2264 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2259 ring->name, ret); 2265 engine->name, ret);
2260 intel_destroy_ringbuffer_obj(ringbuf); 2266 intel_destroy_ringbuffer_obj(ringbuf);
2261 goto error; 2267 goto error;
2262 } 2268 }
2263 2269
2264 ret = i915_cmd_parser_init_ring(ring); 2270 ret = i915_cmd_parser_init_ring(engine);
2265 if (ret) 2271 if (ret)
2266 goto error; 2272 goto error;
2267 2273
2268 return 0; 2274 return 0;
2269 2275
2270error: 2276error:
2271 intel_cleanup_ring_buffer(ring); 2277 intel_cleanup_ring_buffer(engine);
2272 return ret; 2278 return ret;
2273} 2279}
2274 2280
2275void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 2281void intel_cleanup_ring_buffer(struct intel_engine_cs *engine)
2276{ 2282{
2277 struct drm_i915_private *dev_priv; 2283 struct drm_i915_private *dev_priv;
2278 2284
2279 if (!intel_ring_initialized(ring)) 2285 if (!intel_ring_initialized(engine))
2280 return; 2286 return;
2281 2287
2282 dev_priv = to_i915(ring->dev); 2288 dev_priv = to_i915(engine->dev);
2283 2289
2284 if (ring->buffer) { 2290 if (engine->buffer) {
2285 intel_stop_ring_buffer(ring); 2291 intel_stop_ring_buffer(engine);
2286 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 2292 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2287 2293
2288 intel_unpin_ringbuffer_obj(ring->buffer); 2294 intel_unpin_ringbuffer_obj(engine->buffer);
2289 intel_ringbuffer_free(ring->buffer); 2295 intel_ringbuffer_free(engine->buffer);
2290 ring->buffer = NULL; 2296 engine->buffer = NULL;
2291 } 2297 }
2292 2298
2293 if (ring->cleanup) 2299 if (engine->cleanup)
2294 ring->cleanup(ring); 2300 engine->cleanup(engine);
2295 2301
2296 if (I915_NEED_GFX_HWS(ring->dev)) { 2302 if (I915_NEED_GFX_HWS(engine->dev)) {
2297 cleanup_status_page(ring); 2303 cleanup_status_page(engine);
2298 } else { 2304 } else {
2299 WARN_ON(ring->id != RCS); 2305 WARN_ON(engine->id != RCS);
2300 cleanup_phys_status_page(ring); 2306 cleanup_phys_status_page(engine);
2301 } 2307 }
2302 2308
2303 i915_cmd_parser_fini_ring(ring); 2309 i915_cmd_parser_fini_ring(engine);
2304 i915_gem_batch_pool_fini(&ring->batch_pool); 2310 i915_gem_batch_pool_fini(&engine->batch_pool);
2305 ring->dev = NULL; 2311 engine->dev = NULL;
2306} 2312}
2307 2313
2308static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 2314static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
2309{ 2315{
2310 struct intel_ringbuffer *ringbuf = ring->buffer; 2316 struct intel_ringbuffer *ringbuf = engine->buffer;
2311 struct drm_i915_gem_request *request; 2317 struct drm_i915_gem_request *request;
2312 unsigned space; 2318 unsigned space;
2313 int ret; 2319 int ret;
@@ -2318,14 +2324,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2318 /* The whole point of reserving space is to not wait! */ 2324 /* The whole point of reserving space is to not wait! */
2319 WARN_ON(ringbuf->reserved_in_use); 2325 WARN_ON(ringbuf->reserved_in_use);
2320 2326
2321 list_for_each_entry(request, &ring->request_list, list) { 2327 list_for_each_entry(request, &engine->request_list, list) {
2322 space = __intel_ring_space(request->postfix, ringbuf->tail, 2328 space = __intel_ring_space(request->postfix, ringbuf->tail,
2323 ringbuf->size); 2329 ringbuf->size);
2324 if (space >= n) 2330 if (space >= n)
2325 break; 2331 break;
2326 } 2332 }
2327 2333
2328 if (WARN_ON(&request->list == &ring->request_list)) 2334 if (WARN_ON(&request->list == &engine->request_list))
2329 return -ENOSPC; 2335 return -ENOSPC;
2330 2336
2331 ret = i915_wait_request(request); 2337 ret = i915_wait_request(request);
@@ -2350,22 +2356,22 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
2350 intel_ring_update_space(ringbuf); 2356 intel_ring_update_space(ringbuf);
2351} 2357}
2352 2358
2353int intel_ring_idle(struct intel_engine_cs *ring) 2359int intel_ring_idle(struct intel_engine_cs *engine)
2354{ 2360{
2355 struct drm_i915_gem_request *req; 2361 struct drm_i915_gem_request *req;
2356 2362
2357 /* Wait upon the last request to be completed */ 2363 /* Wait upon the last request to be completed */
2358 if (list_empty(&ring->request_list)) 2364 if (list_empty(&engine->request_list))
2359 return 0; 2365 return 0;
2360 2366
2361 req = list_entry(ring->request_list.prev, 2367 req = list_entry(engine->request_list.prev,
2362 struct drm_i915_gem_request, 2368 struct drm_i915_gem_request,
2363 list); 2369 list);
2364 2370
2365 /* Make sure we do not trigger any retires */ 2371 /* Make sure we do not trigger any retires */
2366 return __i915_wait_request(req, 2372 return __i915_wait_request(req,
2367 atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), 2373 atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
2368 to_i915(ring->dev)->mm.interruptible, 2374 to_i915(engine->dev)->mm.interruptible,
2369 NULL, NULL); 2375 NULL, NULL);
2370} 2376}
2371 2377
@@ -2437,9 +2443,9 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2437 ringbuf->reserved_in_use = false; 2443 ringbuf->reserved_in_use = false;
2438} 2444}
2439 2445
2440static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) 2446static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
2441{ 2447{
2442 struct intel_ringbuffer *ringbuf = ring->buffer; 2448 struct intel_ringbuffer *ringbuf = engine->buffer;
2443 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2449 int remain_usable = ringbuf->effective_size - ringbuf->tail;
2444 int remain_actual = ringbuf->size - ringbuf->tail; 2450 int remain_actual = ringbuf->size - ringbuf->tail;
2445 int ret, total_bytes, wait_bytes = 0; 2451 int ret, total_bytes, wait_bytes = 0;
@@ -2473,7 +2479,7 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2473 } 2479 }
2474 2480
2475 if (wait_bytes) { 2481 if (wait_bytes) {
2476 ret = ring_wait_for_space(ring, wait_bytes); 2482 ret = ring_wait_for_space(engine, wait_bytes);
2477 if (unlikely(ret)) 2483 if (unlikely(ret))
2478 return ret; 2484 return ret;
2479 2485
@@ -2531,26 +2537,26 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2531 return 0; 2537 return 0;
2532} 2538}
2533 2539
2534void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 2540void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2535{ 2541{
2536 struct drm_device *dev = ring->dev; 2542 struct drm_device *dev = engine->dev;
2537 struct drm_i915_private *dev_priv = dev->dev_private; 2543 struct drm_i915_private *dev_priv = dev->dev_private;
2538 2544
2539 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 2545 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
2540 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 2546 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2541 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 2547 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2542 if (HAS_VEBOX(dev)) 2548 if (HAS_VEBOX(dev))
2543 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 2549 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
2544 } 2550 }
2545 2551
2546 ring->set_seqno(ring, seqno); 2552 engine->set_seqno(engine, seqno);
2547 ring->hangcheck.seqno = seqno; 2553 engine->hangcheck.seqno = seqno;
2548} 2554}
2549 2555
2550static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 2556static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2551 u32 value) 2557 u32 value)
2552{ 2558{
2553 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2559 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2554 2560
2555 /* Every tail move must follow the sequence below */ 2561 /* Every tail move must follow the sequence below */
2556 2562
@@ -2570,8 +2576,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
2570 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2576 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2571 2577
2572 /* Now that the ring is fully powered up, update the tail */ 2578 /* Now that the ring is fully powered up, update the tail */
2573 I915_WRITE_TAIL(ring, value); 2579 I915_WRITE_TAIL(engine, value);
2574 POSTING_READ(RING_TAIL(ring->mmio_base)); 2580 POSTING_READ(RING_TAIL(engine->mmio_base));
2575 2581
2576 /* Let the ring send IDLE messages to the GT again, 2582 /* Let the ring send IDLE messages to the GT again,
2577 * and so let it sleep to conserve power when idle. 2583 * and so let it sleep to conserve power when idle.
@@ -3157,17 +3163,17 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
3157} 3163}
3158 3164
3159void 3165void
3160intel_stop_ring_buffer(struct intel_engine_cs *ring) 3166intel_stop_ring_buffer(struct intel_engine_cs *engine)
3161{ 3167{
3162 int ret; 3168 int ret;
3163 3169
3164 if (!intel_ring_initialized(ring)) 3170 if (!intel_ring_initialized(engine))
3165 return; 3171 return;
3166 3172
3167 ret = intel_ring_idle(ring); 3173 ret = intel_ring_idle(engine);
3168 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 3174 if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
3169 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 3175 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
3170 ring->name, ret); 3176 engine->name, ret);
3171 3177
3172 stop_ring(ring); 3178 stop_ring(engine);
3173} 3179}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 24efb57dcd7d..48484639c9da 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -355,19 +355,19 @@ struct intel_engine_cs {
355}; 355};
356 356
357static inline bool 357static inline bool
358intel_ring_initialized(struct intel_engine_cs *ring) 358intel_ring_initialized(struct intel_engine_cs *engine)
359{ 359{
360 return ring->dev != NULL; 360 return engine->dev != NULL;
361} 361}
362 362
363static inline unsigned 363static inline unsigned
364intel_ring_flag(struct intel_engine_cs *ring) 364intel_ring_flag(struct intel_engine_cs *engine)
365{ 365{
366 return 1 << ring->id; 366 return 1 << engine->id;
367} 367}
368 368
369static inline u32 369static inline u32
370intel_ring_sync_index(struct intel_engine_cs *ring, 370intel_ring_sync_index(struct intel_engine_cs *engine,
371 struct intel_engine_cs *other) 371 struct intel_engine_cs *other)
372{ 372{
373 int idx; 373 int idx;
@@ -380,7 +380,7 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
380 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 380 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
381 */ 381 */
382 382
383 idx = (other - ring) - 1; 383 idx = (other - engine) - 1;
384 if (idx < 0) 384 if (idx < 0)
385 idx += I915_NUM_RINGS; 385 idx += I915_NUM_RINGS;
386 386
@@ -388,26 +388,26 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
388} 388}
389 389
390static inline void 390static inline void
391intel_flush_status_page(struct intel_engine_cs *ring, int reg) 391intel_flush_status_page(struct intel_engine_cs *engine, int reg)
392{ 392{
393 drm_clflush_virt_range(&ring->status_page.page_addr[reg], 393 drm_clflush_virt_range(&engine->status_page.page_addr[reg],
394 sizeof(uint32_t)); 394 sizeof(uint32_t));
395} 395}
396 396
397static inline u32 397static inline u32
398intel_read_status_page(struct intel_engine_cs *ring, 398intel_read_status_page(struct intel_engine_cs *engine,
399 int reg) 399 int reg)
400{ 400{
401 /* Ensure that the compiler doesn't optimize away the load. */ 401 /* Ensure that the compiler doesn't optimize away the load. */
402 barrier(); 402 barrier();
403 return ring->status_page.page_addr[reg]; 403 return engine->status_page.page_addr[reg];
404} 404}
405 405
406static inline void 406static inline void
407intel_write_status_page(struct intel_engine_cs *ring, 407intel_write_status_page(struct intel_engine_cs *engine,
408 int reg, u32 value) 408 int reg, u32 value)
409{ 409{
410 ring->status_page.page_addr[reg] = value; 410 engine->status_page.page_addr[reg] = value;
411} 411}
412 412
413/* 413/*
@@ -438,42 +438,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
438void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 438void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
439void intel_ringbuffer_free(struct intel_ringbuffer *ring); 439void intel_ringbuffer_free(struct intel_ringbuffer *ring);
440 440
441void intel_stop_ring_buffer(struct intel_engine_cs *ring); 441void intel_stop_ring_buffer(struct intel_engine_cs *engine);
442void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 442void intel_cleanup_ring_buffer(struct intel_engine_cs *engine);
443 443
444int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 444int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
445 445
446int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 446int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
447int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 447int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
448static inline void intel_ring_emit(struct intel_engine_cs *ring, 448static inline void intel_ring_emit(struct intel_engine_cs *engine,
449 u32 data) 449 u32 data)
450{ 450{
451 struct intel_ringbuffer *ringbuf = ring->buffer; 451 struct intel_ringbuffer *ringbuf = engine->buffer;
452 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 452 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
453 ringbuf->tail += 4; 453 ringbuf->tail += 4;
454} 454}
455static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, 455static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
456 i915_reg_t reg) 456 i915_reg_t reg)
457{ 457{
458 intel_ring_emit(ring, i915_mmio_reg_offset(reg)); 458 intel_ring_emit(engine, i915_mmio_reg_offset(reg));
459} 459}
460static inline void intel_ring_advance(struct intel_engine_cs *ring) 460static inline void intel_ring_advance(struct intel_engine_cs *engine)
461{ 461{
462 struct intel_ringbuffer *ringbuf = ring->buffer; 462 struct intel_ringbuffer *ringbuf = engine->buffer;
463 ringbuf->tail &= ringbuf->size - 1; 463 ringbuf->tail &= ringbuf->size - 1;
464} 464}
465int __intel_ring_space(int head, int tail, int size); 465int __intel_ring_space(int head, int tail, int size);
466void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 466void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
467int intel_ring_space(struct intel_ringbuffer *ringbuf); 467int intel_ring_space(struct intel_ringbuffer *ringbuf);
468bool intel_ring_stopped(struct intel_engine_cs *ring); 468bool intel_ring_stopped(struct intel_engine_cs *engine);
469 469
470int __must_check intel_ring_idle(struct intel_engine_cs *ring); 470int __must_check intel_ring_idle(struct intel_engine_cs *engine);
471void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 471void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
472int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 472int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
473int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 473int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
474 474
475void intel_fini_pipe_control(struct intel_engine_cs *ring); 475void intel_fini_pipe_control(struct intel_engine_cs *engine);
476int intel_init_pipe_control(struct intel_engine_cs *ring); 476int intel_init_pipe_control(struct intel_engine_cs *engine);
477 477
478int intel_init_render_ring_buffer(struct drm_device *dev); 478int intel_init_render_ring_buffer(struct drm_device *dev);
479int intel_init_bsd_ring_buffer(struct drm_device *dev); 479int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -481,9 +481,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
481int intel_init_blt_ring_buffer(struct drm_device *dev); 481int intel_init_blt_ring_buffer(struct drm_device *dev);
482int intel_init_vebox_ring_buffer(struct drm_device *dev); 482int intel_init_vebox_ring_buffer(struct drm_device *dev);
483 483
484u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 484u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
485 485
486int init_workarounds_ring(struct intel_engine_cs *ring); 486int init_workarounds_ring(struct intel_engine_cs *engine);
487 487
488static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 488static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
489{ 489{