aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2016-03-23 14:19:53 -0400
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2016-03-24 10:34:06 -0400
commitc3232b1883e033e291aa3146f431b7ec87c80ec5 (patch)
tree23ddb4979bfa6c0da7b9393f254ec303102046d8
parentdb18b6a64ca3fb260858279b218b84d5c179330f (diff)
drm/i915: introduce for_each_engine_id()
Equivalent to the existing for_each_engine() macro, this will replace the latter wherever the third argument *is* actually wanted (in most places, it is not used). The third argument is renamed to emphasise that it is an engine id (type enum intel_engine_id). All the callers of the macro that actually need the third argument are updated to use this version, and the argument (generally 'i') is also updated to be 'id'. Other callers (where the third argument is unused) are untouched for now; they will be updated in the next patch. Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c48
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
7 files changed, 62 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e38000f..77dce527a1cb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -132,7 +132,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
132 struct intel_engine_cs *engine; 132 struct intel_engine_cs *engine;
133 struct i915_vma *vma; 133 struct i915_vma *vma;
134 int pin_count = 0; 134 int pin_count = 0;
135 int i; 135 enum intel_engine_id id;
136 136
137 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", 137 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
138 &obj->base, 138 &obj->base,
@@ -143,9 +143,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
143 obj->base.size / 1024, 143 obj->base.size / 1024,
144 obj->base.read_domains, 144 obj->base.read_domains,
145 obj->base.write_domain); 145 obj->base.write_domain);
146 for_each_engine(engine, dev_priv, i) 146 for_each_engine_id(engine, dev_priv, id)
147 seq_printf(m, "%x ", 147 seq_printf(m, "%x ",
148 i915_gem_request_get_seqno(obj->last_read_req[i])); 148 i915_gem_request_get_seqno(obj->last_read_req[id]));
149 seq_printf(m, "] %x %x%s%s%s", 149 seq_printf(m, "] %x %x%s%s%s",
150 i915_gem_request_get_seqno(obj->last_write_req), 150 i915_gem_request_get_seqno(obj->last_write_req),
151 i915_gem_request_get_seqno(obj->last_fenced_req), 151 i915_gem_request_get_seqno(obj->last_fenced_req),
@@ -1334,7 +1334,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1334 u64 acthd[I915_NUM_ENGINES]; 1334 u64 acthd[I915_NUM_ENGINES];
1335 u32 seqno[I915_NUM_ENGINES]; 1335 u32 seqno[I915_NUM_ENGINES];
1336 u32 instdone[I915_NUM_INSTDONE_REG]; 1336 u32 instdone[I915_NUM_INSTDONE_REG];
1337 int i, j; 1337 enum intel_engine_id id;
1338 int j;
1338 1339
1339 if (!i915.enable_hangcheck) { 1340 if (!i915.enable_hangcheck) {
1340 seq_printf(m, "Hangcheck disabled\n"); 1341 seq_printf(m, "Hangcheck disabled\n");
@@ -1343,9 +1344,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1343 1344
1344 intel_runtime_pm_get(dev_priv); 1345 intel_runtime_pm_get(dev_priv);
1345 1346
1346 for_each_engine(engine, dev_priv, i) { 1347 for_each_engine_id(engine, dev_priv, id) {
1347 seqno[i] = engine->get_seqno(engine, false); 1348 seqno[id] = engine->get_seqno(engine, false);
1348 acthd[i] = intel_ring_get_active_head(engine); 1349 acthd[id] = intel_ring_get_active_head(engine);
1349 } 1350 }
1350 1351
1351 i915_get_extra_instdone(dev, instdone); 1352 i915_get_extra_instdone(dev, instdone);
@@ -1359,13 +1360,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1359 } else 1360 } else
1360 seq_printf(m, "Hangcheck inactive\n"); 1361 seq_printf(m, "Hangcheck inactive\n");
1361 1362
1362 for_each_engine(engine, dev_priv, i) { 1363 for_each_engine_id(engine, dev_priv, id) {
1363 seq_printf(m, "%s:\n", engine->name); 1364 seq_printf(m, "%s:\n", engine->name);
1364 seq_printf(m, "\tseqno = %x [current %x]\n", 1365 seq_printf(m, "\tseqno = %x [current %x]\n",
1365 engine->hangcheck.seqno, seqno[i]); 1366 engine->hangcheck.seqno, seqno[id]);
1366 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1367 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1367 (long long)engine->hangcheck.acthd, 1368 (long long)engine->hangcheck.acthd,
1368 (long long)acthd[i]); 1369 (long long)acthd[id]);
1369 seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); 1370 seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
1370 seq_printf(m, "\taction = %d\n", engine->hangcheck.action); 1371 seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
1371 1372
@@ -1947,7 +1948,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
1947 struct drm_i915_private *dev_priv = dev->dev_private; 1948 struct drm_i915_private *dev_priv = dev->dev_private;
1948 struct intel_engine_cs *engine; 1949 struct intel_engine_cs *engine;
1949 struct intel_context *ctx; 1950 struct intel_context *ctx;
1950 int ret, i; 1951 enum intel_engine_id id;
1952 int ret;
1951 1953
1952 ret = mutex_lock_interruptible(&dev->struct_mutex); 1954 ret = mutex_lock_interruptible(&dev->struct_mutex);
1953 if (ret) 1955 if (ret)
@@ -1965,11 +1967,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
1965 1967
1966 if (i915.enable_execlists) { 1968 if (i915.enable_execlists) {
1967 seq_putc(m, '\n'); 1969 seq_putc(m, '\n');
1968 for_each_engine(engine, dev_priv, i) { 1970 for_each_engine_id(engine, dev_priv, id) {
1969 struct drm_i915_gem_object *ctx_obj = 1971 struct drm_i915_gem_object *ctx_obj =
1970 ctx->engine[i].state; 1972 ctx->engine[id].state;
1971 struct intel_ringbuffer *ringbuf = 1973 struct intel_ringbuffer *ringbuf =
1972 ctx->engine[i].ringbuf; 1974 ctx->engine[id].ringbuf;
1973 1975
1974 seq_printf(m, "%s: ", engine->name); 1976 seq_printf(m, "%s: ", engine->name);
1975 if (ctx_obj) 1977 if (ctx_obj)
@@ -3134,7 +3136,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3134 struct drm_i915_private *dev_priv = dev->dev_private; 3136 struct drm_i915_private *dev_priv = dev->dev_private;
3135 struct intel_engine_cs *engine; 3137 struct intel_engine_cs *engine;
3136 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3138 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3137 int i, j, ret; 3139 enum intel_engine_id id;
3140 int j, ret;
3138 3141
3139 if (!i915_semaphore_is_enabled(dev)) { 3142 if (!i915_semaphore_is_enabled(dev)) {
3140 seq_puts(m, "Semaphores are disabled\n"); 3143 seq_puts(m, "Semaphores are disabled\n");
@@ -3153,14 +3156,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3153 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 3156 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3154 3157
3155 seqno = (uint64_t *)kmap_atomic(page); 3158 seqno = (uint64_t *)kmap_atomic(page);
3156 for_each_engine(engine, dev_priv, i) { 3159 for_each_engine_id(engine, dev_priv, id) {
3157 uint64_t offset; 3160 uint64_t offset;
3158 3161
3159 seq_printf(m, "%s\n", engine->name); 3162 seq_printf(m, "%s\n", engine->name);
3160 3163
3161 seq_puts(m, " Last signal:"); 3164 seq_puts(m, " Last signal:");
3162 for (j = 0; j < num_rings; j++) { 3165 for (j = 0; j < num_rings; j++) {
3163 offset = i * I915_NUM_ENGINES + j; 3166 offset = id * I915_NUM_ENGINES + j;
3164 seq_printf(m, "0x%08llx (0x%02llx) ", 3167 seq_printf(m, "0x%08llx (0x%02llx) ",
3165 seqno[offset], offset * 8); 3168 seqno[offset], offset * 8);
3166 } 3169 }
@@ -3168,7 +3171,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3168 3171
3169 seq_puts(m, " Last wait: "); 3172 seq_puts(m, " Last wait: ");
3170 for (j = 0; j < num_rings; j++) { 3173 for (j = 0; j < num_rings; j++) {
3171 offset = i + (j * I915_NUM_ENGINES); 3174 offset = id + (j * I915_NUM_ENGINES);
3172 seq_printf(m, "0x%08llx (0x%02llx) ", 3175 seq_printf(m, "0x%08llx (0x%02llx) ",
3173 seqno[offset], offset * 8); 3176 seqno[offset], offset * 8);
3174 } 3177 }
@@ -3178,7 +3181,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3178 kunmap_atomic(seqno); 3181 kunmap_atomic(seqno);
3179 } else { 3182 } else {
3180 seq_puts(m, " Last signal:"); 3183 seq_puts(m, " Last signal:");
3181 for_each_engine(engine, dev_priv, i) 3184 for_each_engine(engine, dev_priv, id)
3182 for (j = 0; j < num_rings; j++) 3185 for (j = 0; j < num_rings; j++)
3183 seq_printf(m, "0x%08x\n", 3186 seq_printf(m, "0x%08x\n",
3184 I915_READ(engine->semaphore.mbox.signal[j])); 3187 I915_READ(engine->semaphore.mbox.signal[j]));
@@ -3186,7 +3189,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3186 } 3189 }
3187 3190
3188 seq_puts(m, "\nSync seqno:\n"); 3191 seq_puts(m, "\nSync seqno:\n");
3189 for_each_engine(engine, dev_priv, i) { 3192 for_each_engine(engine, dev_priv, id) {
3190 for (j = 0; j < num_rings; j++) { 3193 for (j = 0; j < num_rings; j++) {
3191 seq_printf(m, " 0x%08x ", 3194 seq_printf(m, " 0x%08x ",
3192 engine->semaphore.sync_seqno[j]); 3195 engine->semaphore.sync_seqno[j]);
@@ -3236,6 +3239,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3236 struct drm_device *dev = node->minor->dev; 3239 struct drm_device *dev = node->minor->dev;
3237 struct drm_i915_private *dev_priv = dev->dev_private; 3240 struct drm_i915_private *dev_priv = dev->dev_private;
3238 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3241 struct i915_workarounds *workarounds = &dev_priv->workarounds;
3242 enum intel_engine_id id;
3239 3243
3240 ret = mutex_lock_interruptible(&dev->struct_mutex); 3244 ret = mutex_lock_interruptible(&dev->struct_mutex);
3241 if (ret) 3245 if (ret)
@@ -3244,9 +3248,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3244 intel_runtime_pm_get(dev_priv); 3248 intel_runtime_pm_get(dev_priv);
3245 3249
3246 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3250 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3247 for_each_engine(engine, dev_priv, i) 3251 for_each_engine_id(engine, dev_priv, id)
3248 seq_printf(m, "HW whitelist count for %s: %d\n", 3252 seq_printf(m, "HW whitelist count for %s: %d\n",
3249 engine->name, workarounds->hw_whitelist_count[i]); 3253 engine->name, workarounds->hw_whitelist_count[id]);
3250 for (i = 0; i < workarounds->count; ++i) { 3254 for (i = 0; i < workarounds->count; ++i) {
3251 i915_reg_t addr; 3255 i915_reg_t addr;
3252 u32 mask, value, read; 3256 u32 mask, value, read;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 08b88c043431..8fe0592341b8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1995,6 +1995,15 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
1995 for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \ 1995 for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
1996 for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__)))) 1996 for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
1997 1997
1998/* Iterator with engine_id */
1999#define for_each_engine_id(engine__, dev_priv__, id__) \
2000 for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
2001 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
2002 (engine__)++) \
2003 for_each_if (((id__) = (engine__)->id, \
2004 intel_engine_initialized(engine__)))
2005
2006/* Iterator over subset of engines selected by mask */
1998#define for_each_engine_masked(engine__, dev_priv__, mask__) \ 2007#define for_each_engine_masked(engine__, dev_priv__, mask__) \
1999 for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \ 2008 for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \
2000 for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__))) 2009 for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__)))
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1f8ff06eed6b..54c208665b0d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -846,7 +846,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
846 struct drm_i915_error_ring *ering) 846 struct drm_i915_error_ring *ering)
847{ 847{
848 struct intel_engine_cs *to; 848 struct intel_engine_cs *to;
849 int i; 849 enum intel_engine_id id;
850 850
851 if (!i915_semaphore_is_enabled(dev_priv->dev)) 851 if (!i915_semaphore_is_enabled(dev_priv->dev))
852 return; 852 return;
@@ -856,7 +856,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
856 i915_error_ggtt_object_create(dev_priv, 856 i915_error_ggtt_object_create(dev_priv,
857 dev_priv->semaphore_obj); 857 dev_priv->semaphore_obj);
858 858
859 for_each_engine(to, dev_priv, i) { 859 for_each_engine_id(to, dev_priv, id) {
860 int idx; 860 int idx;
861 u16 signal_offset; 861 u16 signal_offset;
862 u32 *tmp; 862 u32 *tmp;
@@ -864,7 +864,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
864 if (engine == to) 864 if (engine == to)
865 continue; 865 continue;
866 866
867 signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1)) 867 signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
868 / 4; 868 / 4;
869 tmp = error->semaphore_obj->pages[0]; 869 tmp = error->semaphore_obj->pages[0];
870 idx = intel_ring_sync_index(engine, to); 870 idx = intel_ring_sync_index(engine, to);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index ae1f58d073f2..0611bdc13d2d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -381,7 +381,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
381 struct intel_context *ctx = client->owner; 381 struct intel_context *ctx = client->owner;
382 struct guc_context_desc desc; 382 struct guc_context_desc desc;
383 struct sg_table *sg; 383 struct sg_table *sg;
384 int i; 384 enum intel_engine_id id;
385 385
386 memset(&desc, 0, sizeof(desc)); 386 memset(&desc, 0, sizeof(desc));
387 387
@@ -390,7 +390,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
390 desc.priority = client->priority; 390 desc.priority = client->priority;
391 desc.db_id = client->doorbell_id; 391 desc.db_id = client->doorbell_id;
392 392
393 for_each_engine(engine, dev_priv, i) { 393 for_each_engine_id(engine, dev_priv, id) {
394 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; 394 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
395 struct drm_i915_gem_object *obj; 395 struct drm_i915_gem_object *obj;
396 uint64_t ctx_desc; 396 uint64_t ctx_desc;
@@ -402,7 +402,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
402 * for now who owns a GuC client. But for future owner of GuC 402 * for now who owns a GuC client. But for future owner of GuC
403 * client, need to make sure lrc is pinned prior to enter here. 403 * client, need to make sure lrc is pinned prior to enter here.
404 */ 404 */
405 obj = ctx->engine[i].state; 405 obj = ctx->engine[id].state;
406 if (!obj) 406 if (!obj)
407 break; /* XXX: continue? */ 407 break; /* XXX: continue? */
408 408
@@ -415,7 +415,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
415 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 415 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
416 (engine->guc_id << GUC_ELC_ENGINE_OFFSET); 416 (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
417 417
418 obj = ctx->engine[i].ringbuf->obj; 418 obj = ctx->engine[id].ringbuf->obj;
419 419
420 lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); 420 lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
421 lrc->ring_end = lrc->ring_begin + obj->base.size - 1; 421 lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a55a7cc317f8..14a23b346631 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3073,7 +3073,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3073 gpu_error.hangcheck_work.work); 3073 gpu_error.hangcheck_work.work);
3074 struct drm_device *dev = dev_priv->dev; 3074 struct drm_device *dev = dev_priv->dev;
3075 struct intel_engine_cs *engine; 3075 struct intel_engine_cs *engine;
3076 int i; 3076 enum intel_engine_id id;
3077 int busy_count = 0, rings_hung = 0; 3077 int busy_count = 0, rings_hung = 0;
3078 bool stuck[I915_NUM_ENGINES] = { 0 }; 3078 bool stuck[I915_NUM_ENGINES] = { 0 };
3079#define BUSY 1 3079#define BUSY 1
@@ -3097,7 +3097,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3097 */ 3097 */
3098 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3098 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3099 3099
3100 for_each_engine(engine, dev_priv, i) { 3100 for_each_engine_id(engine, dev_priv, id) {
3101 u64 acthd; 3101 u64 acthd;
3102 u32 seqno; 3102 u32 seqno;
3103 bool busy = true; 3103 bool busy = true;
@@ -3157,7 +3157,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3157 break; 3157 break;
3158 case HANGCHECK_HUNG: 3158 case HANGCHECK_HUNG:
3159 engine->hangcheck.score += HUNG; 3159 engine->hangcheck.score += HUNG;
3160 stuck[i] = true; 3160 stuck[id] = true;
3161 break; 3161 break;
3162 } 3162 }
3163 } 3163 }
@@ -3184,10 +3184,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3184 busy_count += busy; 3184 busy_count += busy;
3185 } 3185 }
3186 3186
3187 for_each_engine(engine, dev_priv, i) { 3187 for_each_engine_id(engine, dev_priv, id) {
3188 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3188 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3189 DRM_INFO("%s on %s\n", 3189 DRM_INFO("%s on %s\n",
3190 stuck[i] ? "stuck" : "no progress", 3190 stuck[id] ? "stuck" : "no progress",
3191 engine->name); 3191 engine->name);
3192 rings_hung |= intel_engine_flag(engine); 3192 rings_hung |= intel_engine_flag(engine);
3193 } 3193 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 3c725dde16ed..7c7ac0aa192a 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -325,11 +325,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
325 if (get_mocs_settings(req->engine->dev, &t)) { 325 if (get_mocs_settings(req->engine->dev, &t)) {
326 struct drm_i915_private *dev_priv = req->i915; 326 struct drm_i915_private *dev_priv = req->i915;
327 struct intel_engine_cs *engine; 327 struct intel_engine_cs *engine;
328 enum intel_engine_id ring_id; 328 enum intel_engine_id id;
329 329
330 /* Program the control registers */ 330 /* Program the control registers */
331 for_each_engine(engine, dev_priv, ring_id) { 331 for_each_engine_id(engine, dev_priv, id) {
332 ret = emit_mocs_control_table(req, &t, ring_id); 332 ret = emit_mocs_control_table(req, &t, id);
333 if (ret) 333 if (ret)
334 return ret; 334 return ret;
335 } 335 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ce59850f7e73..a492bcabd30d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1280,7 +1280,8 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1280 struct drm_device *dev = signaller->dev; 1280 struct drm_device *dev = signaller->dev;
1281 struct drm_i915_private *dev_priv = dev->dev_private; 1281 struct drm_i915_private *dev_priv = dev->dev_private;
1282 struct intel_engine_cs *waiter; 1282 struct intel_engine_cs *waiter;
1283 int i, ret, num_rings; 1283 enum intel_engine_id id;
1284 int ret, num_rings;
1284 1285
1285 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1286 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
1286 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1287 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1290,9 +1291,9 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1290 if (ret) 1291 if (ret)
1291 return ret; 1292 return ret;
1292 1293
1293 for_each_engine(waiter, dev_priv, i) { 1294 for_each_engine_id(waiter, dev_priv, id) {
1294 u32 seqno; 1295 u32 seqno;
1295 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1296 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1296 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1297 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1297 continue; 1298 continue;
1298 1299
@@ -1321,7 +1322,8 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1321 struct drm_device *dev = signaller->dev; 1322 struct drm_device *dev = signaller->dev;
1322 struct drm_i915_private *dev_priv = dev->dev_private; 1323 struct drm_i915_private *dev_priv = dev->dev_private;
1323 struct intel_engine_cs *waiter; 1324 struct intel_engine_cs *waiter;
1324 int i, ret, num_rings; 1325 enum intel_engine_id id;
1326 int ret, num_rings;
1325 1327
1326 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1328 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
1327 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1329 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1331,9 +1333,9 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1331 if (ret) 1333 if (ret)
1332 return ret; 1334 return ret;
1333 1335
1334 for_each_engine(waiter, dev_priv, i) { 1336 for_each_engine_id(waiter, dev_priv, id) {
1335 u32 seqno; 1337 u32 seqno;
1336 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1338 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1337 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1339 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1338 continue; 1340 continue;
1339 1341
@@ -1359,7 +1361,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1359 struct drm_device *dev = signaller->dev; 1361 struct drm_device *dev = signaller->dev;
1360 struct drm_i915_private *dev_priv = dev->dev_private; 1362 struct drm_i915_private *dev_priv = dev->dev_private;
1361 struct intel_engine_cs *useless; 1363 struct intel_engine_cs *useless;
1362 int i, ret, num_rings; 1364 enum intel_engine_id id;
1365 int ret, num_rings;
1363 1366
1364#define MBOX_UPDATE_DWORDS 3 1367#define MBOX_UPDATE_DWORDS 3
1365 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1368 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
@@ -1370,8 +1373,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1370 if (ret) 1373 if (ret)
1371 return ret; 1374 return ret;
1372 1375
1373 for_each_engine(useless, dev_priv, i) { 1376 for_each_engine_id(useless, dev_priv, id) {
1374 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i]; 1377 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
1375 1378
1376 if (i915_mmio_reg_valid(mbox_reg)) { 1379 if (i915_mmio_reg_valid(mbox_reg)) {
1377 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1380 u32 seqno = i915_gem_request_get_seqno(signaller_req);