diff options
author | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2016-03-16 07:00:38 -0400 |
---|---|---|
committer | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2016-03-16 11:33:17 -0400 |
commit | 4a570db57c051644093c20eea934ee02b6ea84fd (patch) | |
tree | 738778f82dc78524e903619ed44729f21ab32492 | |
parent | 0bc40be85f33ca1795253a5f8674efb430f83cce (diff) |
drm/i915: Rename intel_engine_cs struct members
below and a couple manual fixups.
@@
identifier I, J;
@@
struct I {
...
- struct intel_engine_cs *J;
+ struct intel_engine_cs *engine;
...
}
@@
identifier I, J;
@@
struct I {
...
- struct intel_engine_cs J;
+ struct intel_engine_cs engine;
...
}
@@
struct drm_i915_private *d;
@@
(
- d->ring
+ d->engine
)
@@
struct i915_execbuffer_params *p;
@@
(
- p->ring
+ p->engine
)
@@
struct intel_ringbuffer *r;
@@
(
- r->ring
+ r->engine
)
@@
struct drm_i915_gem_request *req;
@@
(
- req->ring
+ req->engine
)
v2: Script missed the tracepoint code - fixed up by hand.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_render_state.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_guc_submission.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_trace.h | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_mocs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 74 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 2 |
19 files changed, 215 insertions, 214 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 164e1432d41f..a71ffaaf380d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -984,7 +984,7 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
984 | const u32 *hws; | 984 | const u32 *hws; |
985 | int i; | 985 | int i; |
986 | 986 | ||
987 | engine = &dev_priv->ring[(uintptr_t)node->info_ent->data]; | 987 | engine = &dev_priv->engine[(uintptr_t)node->info_ent->data]; |
988 | hws = engine->status_page.page_addr; | 988 | hws = engine->status_page.page_addr; |
989 | if (hws == NULL) | 989 | if (hws == NULL) |
990 | return 0; | 990 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4aa3db61a535..19f605b0cd6d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -87,16 +87,16 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
87 | value = 1; | 87 | value = 1; |
88 | break; | 88 | break; |
89 | case I915_PARAM_HAS_BSD: | 89 | case I915_PARAM_HAS_BSD: |
90 | value = intel_ring_initialized(&dev_priv->ring[VCS]); | 90 | value = intel_ring_initialized(&dev_priv->engine[VCS]); |
91 | break; | 91 | break; |
92 | case I915_PARAM_HAS_BLT: | 92 | case I915_PARAM_HAS_BLT: |
93 | value = intel_ring_initialized(&dev_priv->ring[BCS]); | 93 | value = intel_ring_initialized(&dev_priv->engine[BCS]); |
94 | break; | 94 | break; |
95 | case I915_PARAM_HAS_VEBOX: | 95 | case I915_PARAM_HAS_VEBOX: |
96 | value = intel_ring_initialized(&dev_priv->ring[VECS]); | 96 | value = intel_ring_initialized(&dev_priv->engine[VECS]); |
97 | break; | 97 | break; |
98 | case I915_PARAM_HAS_BSD2: | 98 | case I915_PARAM_HAS_BSD2: |
99 | value = intel_ring_initialized(&dev_priv->ring[VCS2]); | 99 | value = intel_ring_initialized(&dev_priv->engine[VCS2]); |
100 | break; | 100 | break; |
101 | case I915_PARAM_HAS_RELAXED_FENCING: | 101 | case I915_PARAM_HAS_RELAXED_FENCING: |
102 | value = 1; | 102 | value = 1; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8d87242ce601..0187a560aa51 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1652,7 +1652,7 @@ struct i915_execbuffer_params { | |||
1652 | uint32_t dispatch_flags; | 1652 | uint32_t dispatch_flags; |
1653 | uint32_t args_batch_start_offset; | 1653 | uint32_t args_batch_start_offset; |
1654 | uint64_t batch_obj_vm_offset; | 1654 | uint64_t batch_obj_vm_offset; |
1655 | struct intel_engine_cs *ring; | 1655 | struct intel_engine_cs *engine; |
1656 | struct drm_i915_gem_object *batch_obj; | 1656 | struct drm_i915_gem_object *batch_obj; |
1657 | struct intel_context *ctx; | 1657 | struct intel_context *ctx; |
1658 | struct drm_i915_gem_request *request; | 1658 | struct drm_i915_gem_request *request; |
@@ -1704,7 +1704,7 @@ struct drm_i915_private { | |||
1704 | wait_queue_head_t gmbus_wait_queue; | 1704 | wait_queue_head_t gmbus_wait_queue; |
1705 | 1705 | ||
1706 | struct pci_dev *bridge_dev; | 1706 | struct pci_dev *bridge_dev; |
1707 | struct intel_engine_cs ring[I915_NUM_RINGS]; | 1707 | struct intel_engine_cs engine[I915_NUM_RINGS]; |
1708 | struct drm_i915_gem_object *semaphore_obj; | 1708 | struct drm_i915_gem_object *semaphore_obj; |
1709 | uint32_t last_seqno, next_seqno; | 1709 | uint32_t last_seqno, next_seqno; |
1710 | 1710 | ||
@@ -1969,7 +1969,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) | |||
1969 | /* Iterate over initialised rings */ | 1969 | /* Iterate over initialised rings */ |
1970 | #define for_each_ring(ring__, dev_priv__, i__) \ | 1970 | #define for_each_ring(ring__, dev_priv__, i__) \ |
1971 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ | 1971 | for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
1972 | for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))) | 1972 | for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__)))) |
1973 | 1973 | ||
1974 | enum hdmi_force_audio { | 1974 | enum hdmi_force_audio { |
1975 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ | 1975 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ |
@@ -2184,7 +2184,7 @@ struct drm_i915_gem_request { | |||
2184 | 2184 | ||
2185 | /** On Which ring this request was generated */ | 2185 | /** On Which ring this request was generated */ |
2186 | struct drm_i915_private *i915; | 2186 | struct drm_i915_private *i915; |
2187 | struct intel_engine_cs *ring; | 2187 | struct intel_engine_cs *engine; |
2188 | 2188 | ||
2189 | /** GEM sequence number associated with the previous request, | 2189 | /** GEM sequence number associated with the previous request, |
2190 | * when the HWS breadcrumb is equal to this the GPU is processing | 2190 | * when the HWS breadcrumb is equal to this the GPU is processing |
@@ -2279,7 +2279,7 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req) | |||
2279 | static inline struct intel_engine_cs * | 2279 | static inline struct intel_engine_cs * |
2280 | i915_gem_request_get_ring(struct drm_i915_gem_request *req) | 2280 | i915_gem_request_get_ring(struct drm_i915_gem_request *req) |
2281 | { | 2281 | { |
2282 | return req ? req->ring : NULL; | 2282 | return req ? req->engine : NULL; |
2283 | } | 2283 | } |
2284 | 2284 | ||
2285 | static inline struct drm_i915_gem_request * | 2285 | static inline struct drm_i915_gem_request * |
@@ -2293,7 +2293,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) | |||
2293 | static inline void | 2293 | static inline void |
2294 | i915_gem_request_unreference(struct drm_i915_gem_request *req) | 2294 | i915_gem_request_unreference(struct drm_i915_gem_request *req) |
2295 | { | 2295 | { |
2296 | WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); | 2296 | WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); |
2297 | kref_put(&req->ref, i915_gem_request_free); | 2297 | kref_put(&req->ref, i915_gem_request_free); |
2298 | } | 2298 | } |
2299 | 2299 | ||
@@ -2305,7 +2305,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) | |||
2305 | if (!req) | 2305 | if (!req) |
2306 | return; | 2306 | return; |
2307 | 2307 | ||
2308 | dev = req->ring->dev; | 2308 | dev = req->engine->dev; |
2309 | if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) | 2309 | if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) |
2310 | mutex_unlock(&dev->struct_mutex); | 2310 | mutex_unlock(&dev->struct_mutex); |
2311 | } | 2311 | } |
@@ -2949,14 +2949,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
2949 | static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, | 2949 | static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, |
2950 | bool lazy_coherency) | 2950 | bool lazy_coherency) |
2951 | { | 2951 | { |
2952 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); | 2952 | u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency); |
2953 | return i915_seqno_passed(seqno, req->previous_seqno); | 2953 | return i915_seqno_passed(seqno, req->previous_seqno); |
2954 | } | 2954 | } |
2955 | 2955 | ||
2956 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, | 2956 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, |
2957 | bool lazy_coherency) | 2957 | bool lazy_coherency) |
2958 | { | 2958 | { |
2959 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); | 2959 | u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency); |
2960 | return i915_seqno_passed(seqno, req->seqno); | 2960 | return i915_seqno_passed(seqno, req->seqno); |
2961 | } | 2961 | } |
2962 | 2962 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1119b8f46f09..cd68a86437f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1193,7 +1193,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | |||
1193 | * takes to sleep on a request, on the order of a microsecond. | 1193 | * takes to sleep on a request, on the order of a microsecond. |
1194 | */ | 1194 | */ |
1195 | 1195 | ||
1196 | if (req->ring->irq_refcount) | 1196 | if (req->engine->irq_refcount) |
1197 | return -EBUSY; | 1197 | return -EBUSY; |
1198 | 1198 | ||
1199 | /* Only spin if we know the GPU is processing this request */ | 1199 | /* Only spin if we know the GPU is processing this request */ |
@@ -1381,7 +1381,7 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, | |||
1381 | if (req->file_priv) | 1381 | if (req->file_priv) |
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | 1383 | ||
1384 | dev_private = req->ring->dev->dev_private; | 1384 | dev_private = req->engine->dev->dev_private; |
1385 | file_priv = file->driver_priv; | 1385 | file_priv = file->driver_priv; |
1386 | 1386 | ||
1387 | spin_lock(&file_priv->mm.lock); | 1387 | spin_lock(&file_priv->mm.lock); |
@@ -1434,7 +1434,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) | |||
1434 | static void | 1434 | static void |
1435 | __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) | 1435 | __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) |
1436 | { | 1436 | { |
1437 | struct intel_engine_cs *engine = req->ring; | 1437 | struct intel_engine_cs *engine = req->engine; |
1438 | struct drm_i915_gem_request *tmp; | 1438 | struct drm_i915_gem_request *tmp; |
1439 | 1439 | ||
1440 | lockdep_assert_held(&engine->dev->struct_mutex); | 1440 | lockdep_assert_held(&engine->dev->struct_mutex); |
@@ -1466,7 +1466,7 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
1466 | 1466 | ||
1467 | BUG_ON(req == NULL); | 1467 | BUG_ON(req == NULL); |
1468 | 1468 | ||
1469 | dev = req->ring->dev; | 1469 | dev = req->engine->dev; |
1470 | dev_priv = dev->dev_private; | 1470 | dev_priv = dev->dev_private; |
1471 | interruptible = dev_priv->mm.interruptible; | 1471 | interruptible = dev_priv->mm.interruptible; |
1472 | 1472 | ||
@@ -1505,7 +1505,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
1505 | if (ret) | 1505 | if (ret) |
1506 | return ret; | 1506 | return ret; |
1507 | 1507 | ||
1508 | i = obj->last_write_req->ring->id; | 1508 | i = obj->last_write_req->engine->id; |
1509 | if (obj->last_read_req[i] == obj->last_write_req) | 1509 | if (obj->last_read_req[i] == obj->last_write_req) |
1510 | i915_gem_object_retire__read(obj, i); | 1510 | i915_gem_object_retire__read(obj, i); |
1511 | else | 1511 | else |
@@ -1532,7 +1532,7 @@ static void | |||
1532 | i915_gem_object_retire_request(struct drm_i915_gem_object *obj, | 1532 | i915_gem_object_retire_request(struct drm_i915_gem_object *obj, |
1533 | struct drm_i915_gem_request *req) | 1533 | struct drm_i915_gem_request *req) |
1534 | { | 1534 | { |
1535 | int ring = req->ring->id; | 1535 | int ring = req->engine->id; |
1536 | 1536 | ||
1537 | if (obj->last_read_req[ring] == req) | 1537 | if (obj->last_read_req[ring] == req) |
1538 | i915_gem_object_retire__read(obj, ring); | 1538 | i915_gem_object_retire__read(obj, ring); |
@@ -2423,7 +2423,7 @@ static void | |||
2423 | i915_gem_object_retire__write(struct drm_i915_gem_object *obj) | 2423 | i915_gem_object_retire__write(struct drm_i915_gem_object *obj) |
2424 | { | 2424 | { |
2425 | RQ_BUG_ON(obj->last_write_req == NULL); | 2425 | RQ_BUG_ON(obj->last_write_req == NULL); |
2426 | RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring))); | 2426 | RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine))); |
2427 | 2427 | ||
2428 | i915_gem_request_assign(&obj->last_write_req, NULL); | 2428 | i915_gem_request_assign(&obj->last_write_req, NULL); |
2429 | intel_fb_obj_flush(obj, true, ORIGIN_CS); | 2429 | intel_fb_obj_flush(obj, true, ORIGIN_CS); |
@@ -2440,7 +2440,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) | |||
2440 | list_del_init(&obj->ring_list[ring]); | 2440 | list_del_init(&obj->ring_list[ring]); |
2441 | i915_gem_request_assign(&obj->last_read_req[ring], NULL); | 2441 | i915_gem_request_assign(&obj->last_read_req[ring], NULL); |
2442 | 2442 | ||
2443 | if (obj->last_write_req && obj->last_write_req->ring->id == ring) | 2443 | if (obj->last_write_req && obj->last_write_req->engine->id == ring) |
2444 | i915_gem_object_retire__write(obj); | 2444 | i915_gem_object_retire__write(obj); |
2445 | 2445 | ||
2446 | obj->active &= ~(1 << ring); | 2446 | obj->active &= ~(1 << ring); |
@@ -2551,7 +2551,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2551 | if (WARN_ON(request == NULL)) | 2551 | if (WARN_ON(request == NULL)) |
2552 | return; | 2552 | return; |
2553 | 2553 | ||
2554 | engine = request->ring; | 2554 | engine = request->engine; |
2555 | dev_priv = engine->dev->dev_private; | 2555 | dev_priv = engine->dev->dev_private; |
2556 | ringbuf = request->ringbuf; | 2556 | ringbuf = request->ringbuf; |
2557 | 2557 | ||
@@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref) | |||
2680 | 2680 | ||
2681 | if (ctx) { | 2681 | if (ctx) { |
2682 | if (i915.enable_execlists && ctx != req->i915->kernel_context) | 2682 | if (i915.enable_execlists && ctx != req->i915->kernel_context) |
2683 | intel_lr_context_unpin(ctx, req->ring); | 2683 | intel_lr_context_unpin(ctx, req->engine); |
2684 | 2684 | ||
2685 | i915_gem_context_unreference(ctx); | 2685 | i915_gem_context_unreference(ctx); |
2686 | } | 2686 | } |
@@ -2712,7 +2712,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2712 | 2712 | ||
2713 | kref_init(&req->ref); | 2713 | kref_init(&req->ref); |
2714 | req->i915 = dev_priv; | 2714 | req->i915 = dev_priv; |
2715 | req->ring = engine; | 2715 | req->engine = engine; |
2716 | req->ctx = ctx; | 2716 | req->ctx = ctx; |
2717 | i915_gem_context_reference(req->ctx); | 2717 | i915_gem_context_reference(req->ctx); |
2718 | 2718 | ||
@@ -4364,10 +4364,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4364 | 4364 | ||
4365 | req = obj->last_read_req[i]; | 4365 | req = obj->last_read_req[i]; |
4366 | if (req) | 4366 | if (req) |
4367 | args->busy |= 1 << (16 + req->ring->exec_id); | 4367 | args->busy |= 1 << (16 + req->engine->exec_id); |
4368 | } | 4368 | } |
4369 | if (obj->last_write_req) | 4369 | if (obj->last_write_req) |
4370 | args->busy |= obj->last_write_req->ring->exec_id; | 4370 | args->busy |= obj->last_write_req->engine->exec_id; |
4371 | } | 4371 | } |
4372 | 4372 | ||
4373 | unref: | 4373 | unref: |
@@ -4697,7 +4697,7 @@ err: | |||
4697 | 4697 | ||
4698 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) | 4698 | int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) |
4699 | { | 4699 | { |
4700 | struct intel_engine_cs *engine = req->ring; | 4700 | struct intel_engine_cs *engine = req->engine; |
4701 | struct drm_device *dev = engine->dev; | 4701 | struct drm_device *dev = engine->dev; |
4702 | struct drm_i915_private *dev_priv = dev->dev_private; | 4702 | struct drm_i915_private *dev_priv = dev->dev_private; |
4703 | u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; | 4703 | u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; |
@@ -4814,13 +4814,13 @@ int i915_gem_init_rings(struct drm_device *dev) | |||
4814 | return 0; | 4814 | return 0; |
4815 | 4815 | ||
4816 | cleanup_vebox_ring: | 4816 | cleanup_vebox_ring: |
4817 | intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); | 4817 | intel_cleanup_ring_buffer(&dev_priv->engine[VECS]); |
4818 | cleanup_blt_ring: | 4818 | cleanup_blt_ring: |
4819 | intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); | 4819 | intel_cleanup_ring_buffer(&dev_priv->engine[BCS]); |
4820 | cleanup_bsd_ring: | 4820 | cleanup_bsd_ring: |
4821 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); | 4821 | intel_cleanup_ring_buffer(&dev_priv->engine[VCS]); |
4822 | cleanup_render_ring: | 4822 | cleanup_render_ring: |
4823 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); | 4823 | intel_cleanup_ring_buffer(&dev_priv->engine[RCS]); |
4824 | 4824 | ||
4825 | return ret; | 4825 | return ret; |
4826 | } | 4826 | } |
@@ -5056,7 +5056,7 @@ i915_gem_load_init(struct drm_device *dev) | |||
5056 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); | 5056 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
5057 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 5057 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
5058 | for (i = 0; i < I915_NUM_RINGS; i++) | 5058 | for (i = 0; i < I915_NUM_RINGS; i++) |
5059 | init_ring_lists(&dev_priv->ring[i]); | 5059 | init_ring_lists(&dev_priv->engine[i]); |
5060 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) | 5060 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
5061 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 5061 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
5062 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 5062 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 44f582988094..6c325e4c7556 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -346,7 +346,7 @@ void i915_gem_context_reset(struct drm_device *dev) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | for (i = 0; i < I915_NUM_RINGS; i++) { | 348 | for (i = 0; i < I915_NUM_RINGS; i++) { |
349 | struct intel_engine_cs *engine = &dev_priv->ring[i]; | 349 | struct intel_engine_cs *engine = &dev_priv->engine[i]; |
350 | 350 | ||
351 | if (engine->last_context) { | 351 | if (engine->last_context) { |
352 | i915_gem_context_unpin(engine->last_context, engine); | 352 | i915_gem_context_unpin(engine->last_context, engine); |
@@ -421,13 +421,13 @@ void i915_gem_context_fini(struct drm_device *dev) | |||
421 | * to default context. So we need to unreference the base object once | 421 | * to default context. So we need to unreference the base object once |
422 | * to offset the do_switch part, so that i915_gem_context_unreference() | 422 | * to offset the do_switch part, so that i915_gem_context_unreference() |
423 | * can then free the base object correctly. */ | 423 | * can then free the base object correctly. */ |
424 | WARN_ON(!dev_priv->ring[RCS].last_context); | 424 | WARN_ON(!dev_priv->engine[RCS].last_context); |
425 | 425 | ||
426 | i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); | 426 | i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); |
427 | } | 427 | } |
428 | 428 | ||
429 | for (i = I915_NUM_RINGS; --i >= 0;) { | 429 | for (i = I915_NUM_RINGS; --i >= 0;) { |
430 | struct intel_engine_cs *engine = &dev_priv->ring[i]; | 430 | struct intel_engine_cs *engine = &dev_priv->engine[i]; |
431 | 431 | ||
432 | if (engine->last_context) { | 432 | if (engine->last_context) { |
433 | i915_gem_context_unpin(engine->last_context, engine); | 433 | i915_gem_context_unpin(engine->last_context, engine); |
@@ -441,7 +441,7 @@ void i915_gem_context_fini(struct drm_device *dev) | |||
441 | 441 | ||
442 | int i915_gem_context_enable(struct drm_i915_gem_request *req) | 442 | int i915_gem_context_enable(struct drm_i915_gem_request *req) |
443 | { | 443 | { |
444 | struct intel_engine_cs *engine = req->ring; | 444 | struct intel_engine_cs *engine = req->engine; |
445 | int ret; | 445 | int ret; |
446 | 446 | ||
447 | if (i915.enable_execlists) { | 447 | if (i915.enable_execlists) { |
@@ -510,7 +510,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) | |||
510 | static inline int | 510 | static inline int |
511 | mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) | 511 | mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) |
512 | { | 512 | { |
513 | struct intel_engine_cs *engine = req->ring; | 513 | struct intel_engine_cs *engine = req->engine; |
514 | u32 flags = hw_flags | MI_MM_SPACE_GTT; | 514 | u32 flags = hw_flags | MI_MM_SPACE_GTT; |
515 | const int num_rings = | 515 | const int num_rings = |
516 | /* Use an extended w/a on ivb+ if signalling from other rings */ | 516 | /* Use an extended w/a on ivb+ if signalling from other rings */ |
@@ -625,7 +625,7 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) | |||
625 | if (INTEL_INFO(engine->dev)->gen < 8) | 625 | if (INTEL_INFO(engine->dev)->gen < 8) |
626 | return true; | 626 | return true; |
627 | 627 | ||
628 | if (engine != &dev_priv->ring[RCS]) | 628 | if (engine != &dev_priv->engine[RCS]) |
629 | return true; | 629 | return true; |
630 | 630 | ||
631 | return false; | 631 | return false; |
@@ -643,7 +643,7 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, | |||
643 | if (!IS_GEN8(engine->dev)) | 643 | if (!IS_GEN8(engine->dev)) |
644 | return false; | 644 | return false; |
645 | 645 | ||
646 | if (engine != &dev_priv->ring[RCS]) | 646 | if (engine != &dev_priv->engine[RCS]) |
647 | return false; | 647 | return false; |
648 | 648 | ||
649 | if (hw_flags & MI_RESTORE_INHIBIT) | 649 | if (hw_flags & MI_RESTORE_INHIBIT) |
@@ -655,14 +655,14 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, | |||
655 | static int do_switch(struct drm_i915_gem_request *req) | 655 | static int do_switch(struct drm_i915_gem_request *req) |
656 | { | 656 | { |
657 | struct intel_context *to = req->ctx; | 657 | struct intel_context *to = req->ctx; |
658 | struct intel_engine_cs *engine = req->ring; | 658 | struct intel_engine_cs *engine = req->engine; |
659 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 659 | struct drm_i915_private *dev_priv = engine->dev->dev_private; |
660 | struct intel_context *from = engine->last_context; | 660 | struct intel_context *from = engine->last_context; |
661 | u32 hw_flags = 0; | 661 | u32 hw_flags = 0; |
662 | bool uninitialized = false; | 662 | bool uninitialized = false; |
663 | int ret, i; | 663 | int ret, i; |
664 | 664 | ||
665 | if (from != NULL && engine == &dev_priv->ring[RCS]) { | 665 | if (from != NULL && engine == &dev_priv->engine[RCS]) { |
666 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); | 666 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); |
667 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); | 667 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); |
668 | } | 668 | } |
@@ -671,7 +671,7 @@ static int do_switch(struct drm_i915_gem_request *req) | |||
671 | return 0; | 671 | return 0; |
672 | 672 | ||
673 | /* Trying to pin first makes error handling easier. */ | 673 | /* Trying to pin first makes error handling easier. */ |
674 | if (engine == &dev_priv->ring[RCS]) { | 674 | if (engine == &dev_priv->engine[RCS]) { |
675 | ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, | 675 | ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, |
676 | get_context_alignment(engine->dev), | 676 | get_context_alignment(engine->dev), |
677 | 0); | 677 | 0); |
@@ -700,7 +700,7 @@ static int do_switch(struct drm_i915_gem_request *req) | |||
700 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine); | 700 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine); |
701 | } | 701 | } |
702 | 702 | ||
703 | if (engine != &dev_priv->ring[RCS]) { | 703 | if (engine != &dev_priv->engine[RCS]) { |
704 | if (from) | 704 | if (from) |
705 | i915_gem_context_unreference(from); | 705 | i915_gem_context_unreference(from); |
706 | goto done; | 706 | goto done; |
@@ -828,7 +828,7 @@ unpin_out: | |||
828 | */ | 828 | */ |
829 | int i915_switch_context(struct drm_i915_gem_request *req) | 829 | int i915_switch_context(struct drm_i915_gem_request *req) |
830 | { | 830 | { |
831 | struct intel_engine_cs *engine = req->ring; | 831 | struct intel_engine_cs *engine = req->engine; |
832 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 832 | struct drm_i915_private *dev_priv = engine->dev->dev_private; |
833 | 833 | ||
834 | WARN_ON(i915.enable_execlists); | 834 | WARN_ON(i915.enable_execlists); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f94d756828e8..bb1ed8c4bcb4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -942,7 +942,7 @@ static int | |||
942 | i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | 942 | i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, |
943 | struct list_head *vmas) | 943 | struct list_head *vmas) |
944 | { | 944 | { |
945 | const unsigned other_rings = ~intel_ring_flag(req->ring); | 945 | const unsigned other_rings = ~intel_ring_flag(req->engine); |
946 | struct i915_vma *vma; | 946 | struct i915_vma *vma; |
947 | uint32_t flush_domains = 0; | 947 | uint32_t flush_domains = 0; |
948 | bool flush_chipset = false; | 948 | bool flush_chipset = false; |
@@ -952,7 +952,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
952 | struct drm_i915_gem_object *obj = vma->obj; | 952 | struct drm_i915_gem_object *obj = vma->obj; |
953 | 953 | ||
954 | if (obj->active & other_rings) { | 954 | if (obj->active & other_rings) { |
955 | ret = i915_gem_object_sync(obj, req->ring, &req); | 955 | ret = i915_gem_object_sync(obj, req->engine, &req); |
956 | if (ret) | 956 | if (ret) |
957 | return ret; | 957 | return ret; |
958 | } | 958 | } |
@@ -964,7 +964,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
964 | } | 964 | } |
965 | 965 | ||
966 | if (flush_chipset) | 966 | if (flush_chipset) |
967 | i915_gem_chipset_flush(req->ring->dev); | 967 | i915_gem_chipset_flush(req->engine->dev); |
968 | 968 | ||
969 | if (flush_domains & I915_GEM_DOMAIN_GTT) | 969 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
970 | wmb(); | 970 | wmb(); |
@@ -1140,7 +1140,7 @@ void | |||
1140 | i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) | 1140 | i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) |
1141 | { | 1141 | { |
1142 | /* Unconditionally force add_request to emit a full flush. */ | 1142 | /* Unconditionally force add_request to emit a full flush. */ |
1143 | params->ring->gpu_caches_dirty = true; | 1143 | params->engine->gpu_caches_dirty = true; |
1144 | 1144 | ||
1145 | /* Add a breadcrumb for the completion of the batch buffer */ | 1145 | /* Add a breadcrumb for the completion of the batch buffer */ |
1146 | __i915_add_request(params->request, params->batch_obj, true); | 1146 | __i915_add_request(params->request, params->batch_obj, true); |
@@ -1150,11 +1150,11 @@ static int | |||
1150 | i915_reset_gen7_sol_offsets(struct drm_device *dev, | 1150 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
1151 | struct drm_i915_gem_request *req) | 1151 | struct drm_i915_gem_request *req) |
1152 | { | 1152 | { |
1153 | struct intel_engine_cs *engine = req->ring; | 1153 | struct intel_engine_cs *engine = req->engine; |
1154 | struct drm_i915_private *dev_priv = dev->dev_private; | 1154 | struct drm_i915_private *dev_priv = dev->dev_private; |
1155 | int ret, i; | 1155 | int ret, i; |
1156 | 1156 | ||
1157 | if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) { | 1157 | if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { |
1158 | DRM_DEBUG("sol reset is gen7/rcs only\n"); | 1158 | DRM_DEBUG("sol reset is gen7/rcs only\n"); |
1159 | return -EINVAL; | 1159 | return -EINVAL; |
1160 | } | 1160 | } |
@@ -1233,7 +1233,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | |||
1233 | struct list_head *vmas) | 1233 | struct list_head *vmas) |
1234 | { | 1234 | { |
1235 | struct drm_device *dev = params->dev; | 1235 | struct drm_device *dev = params->dev; |
1236 | struct intel_engine_cs *engine = params->ring; | 1236 | struct intel_engine_cs *engine = params->engine; |
1237 | struct drm_i915_private *dev_priv = dev->dev_private; | 1237 | struct drm_i915_private *dev_priv = dev->dev_private; |
1238 | u64 exec_start, exec_len; | 1238 | u64 exec_start, exec_len; |
1239 | int instp_mode; | 1239 | int instp_mode; |
@@ -1257,7 +1257,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | |||
1257 | case I915_EXEC_CONSTANTS_REL_GENERAL: | 1257 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
1258 | case I915_EXEC_CONSTANTS_ABSOLUTE: | 1258 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
1259 | case I915_EXEC_CONSTANTS_REL_SURFACE: | 1259 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
1260 | if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) { | 1260 | if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { |
1261 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); | 1261 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); |
1262 | return -EINVAL; | 1262 | return -EINVAL; |
1263 | } | 1263 | } |
@@ -1284,7 +1284,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | |||
1284 | return -EINVAL; | 1284 | return -EINVAL; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | if (engine == &dev_priv->ring[RCS] && | 1287 | if (engine == &dev_priv->engine[RCS] && |
1288 | instp_mode != dev_priv->relative_constants_mode) { | 1288 | instp_mode != dev_priv->relative_constants_mode) { |
1289 | ret = intel_ring_begin(params->request, 4); | 1289 | ret = intel_ring_begin(params->request, 4); |
1290 | if (ret) | 1290 | if (ret) |
@@ -1412,9 +1412,9 @@ eb_select_ring(struct drm_i915_private *dev_priv, | |||
1412 | return -EINVAL; | 1412 | return -EINVAL; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | *ring = &dev_priv->ring[_VCS(bsd_idx)]; | 1415 | *ring = &dev_priv->engine[_VCS(bsd_idx)]; |
1416 | } else { | 1416 | } else { |
1417 | *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; | 1417 | *ring = &dev_priv->engine[user_ring_map[user_ring_id]]; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | if (!intel_ring_initialized(*ring)) { | 1420 | if (!intel_ring_initialized(*ring)) { |
@@ -1632,7 +1632,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1632 | */ | 1632 | */ |
1633 | params->dev = dev; | 1633 | params->dev = dev; |
1634 | params->file = file; | 1634 | params->file = file; |
1635 | params->ring = engine; | 1635 | params->engine = engine; |
1636 | params->dispatch_flags = dispatch_flags; | 1636 | params->dispatch_flags = dispatch_flags; |
1637 | params->batch_obj = batch_obj; | 1637 | params->batch_obj = batch_obj; |
1638 | params->ctx = ctx; | 1638 | params->ctx = ctx; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1bc77791bc96..ab54396029ca 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, | |||
658 | unsigned entry, | 658 | unsigned entry, |
659 | dma_addr_t addr) | 659 | dma_addr_t addr) |
660 | { | 660 | { |
661 | struct intel_engine_cs *engine = req->ring; | 661 | struct intel_engine_cs *engine = req->engine; |
662 | int ret; | 662 | int ret; |
663 | 663 | ||
664 | BUG_ON(entry >= 4); | 664 | BUG_ON(entry >= 4); |
@@ -1650,7 +1650,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) | |||
1650 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1650 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1651 | struct drm_i915_gem_request *req) | 1651 | struct drm_i915_gem_request *req) |
1652 | { | 1652 | { |
1653 | struct intel_engine_cs *engine = req->ring; | 1653 | struct intel_engine_cs *engine = req->engine; |
1654 | int ret; | 1654 | int ret; |
1655 | 1655 | ||
1656 | /* NB: TLBs must be flushed and invalidated before a switch */ | 1656 | /* NB: TLBs must be flushed and invalidated before a switch */ |
@@ -1676,7 +1676,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
1676 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1676 | static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1677 | struct drm_i915_gem_request *req) | 1677 | struct drm_i915_gem_request *req) |
1678 | { | 1678 | { |
1679 | struct intel_engine_cs *engine = req->ring; | 1679 | struct intel_engine_cs *engine = req->engine; |
1680 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); | 1680 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); |
1681 | 1681 | ||
1682 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); | 1682 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); |
@@ -1687,7 +1687,7 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
1687 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1687 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1688 | struct drm_i915_gem_request *req) | 1688 | struct drm_i915_gem_request *req) |
1689 | { | 1689 | { |
1690 | struct intel_engine_cs *engine = req->ring; | 1690 | struct intel_engine_cs *engine = req->engine; |
1691 | int ret; | 1691 | int ret; |
1692 | 1692 | ||
1693 | /* NB: TLBs must be flushed and invalidated before a switch */ | 1693 | /* NB: TLBs must be flushed and invalidated before a switch */ |
@@ -1720,7 +1720,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, | |||
1720 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, | 1720 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
1721 | struct drm_i915_gem_request *req) | 1721 | struct drm_i915_gem_request *req) |
1722 | { | 1722 | { |
1723 | struct intel_engine_cs *engine = req->ring; | 1723 | struct intel_engine_cs *engine = req->engine; |
1724 | struct drm_device *dev = ppgtt->base.dev; | 1724 | struct drm_device *dev = ppgtt->base.dev; |
1725 | struct drm_i915_private *dev_priv = dev->dev_private; | 1725 | struct drm_i915_private *dev_priv = dev->dev_private; |
1726 | 1726 | ||
@@ -2192,7 +2192,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) | |||
2192 | 2192 | ||
2193 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) | 2193 | int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) |
2194 | { | 2194 | { |
2195 | struct drm_i915_private *dev_priv = req->ring->dev->dev_private; | 2195 | struct drm_i915_private *dev_priv = req->engine->dev->dev_private; |
2196 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | 2196 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2197 | 2197 | ||
2198 | if (i915.enable_execlists) | 2198 | if (i915.enable_execlists) |
@@ -2309,7 +2309,7 @@ void i915_check_and_clear_faults(struct drm_device *dev) | |||
2309 | fault_reg & ~RING_FAULT_VALID); | 2309 | fault_reg & ~RING_FAULT_VALID); |
2310 | } | 2310 | } |
2311 | } | 2311 | } |
2312 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); | 2312 | POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS])); |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) | 2315 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index b21f72ec895c..71611bf21fca 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) | |||
198 | struct render_state so; | 198 | struct render_state so; |
199 | int ret; | 199 | int ret; |
200 | 200 | ||
201 | ret = i915_gem_render_state_prepare(req->ring, &so); | 201 | ret = i915_gem_render_state_prepare(req->engine, &so); |
202 | if (ret) | 202 | if (ret) |
203 | return ret; | 203 | return ret; |
204 | 204 | ||
205 | if (so.rodata == NULL) | 205 | if (so.rodata == NULL) |
206 | return 0; | 206 | return 0; |
207 | 207 | ||
208 | ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset, | 208 | ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset, |
209 | so.rodata->batch_items * 4, | 209 | so.rodata->batch_items * 4, |
210 | I915_DISPATCH_SECURE); | 210 | I915_DISPATCH_SECURE); |
211 | if (ret) | 211 | if (ret) |
212 | goto out; | 212 | goto out; |
213 | 213 | ||
214 | if (so.aux_batch_size > 8) { | 214 | if (so.aux_batch_size > 8) { |
215 | ret = req->ring->dispatch_execbuffer(req, | 215 | ret = req->engine->dispatch_execbuffer(req, |
216 | (so.ggtt_offset + | 216 | (so.ggtt_offset + |
217 | so.aux_batch_offset), | 217 | so.aux_batch_offset), |
218 | so.aux_batch_size, | 218 | so.aux_batch_size, |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 029ed4031edf..a73f7057e875 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -431,7 +431,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
431 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | 431 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
432 | obj = error->ring[i].batchbuffer; | 432 | obj = error->ring[i].batchbuffer; |
433 | if (obj) { | 433 | if (obj) { |
434 | err_puts(m, dev_priv->ring[i].name); | 434 | err_puts(m, dev_priv->engine[i].name); |
435 | if (error->ring[i].pid != -1) | 435 | if (error->ring[i].pid != -1) |
436 | err_printf(m, " (submitted by %s [%d])", | 436 | err_printf(m, " (submitted by %s [%d])", |
437 | error->ring[i].comm, | 437 | error->ring[i].comm, |
@@ -445,14 +445,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
445 | obj = error->ring[i].wa_batchbuffer; | 445 | obj = error->ring[i].wa_batchbuffer; |
446 | if (obj) { | 446 | if (obj) { |
447 | err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", | 447 | err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", |
448 | dev_priv->ring[i].name, | 448 | dev_priv->engine[i].name, |
449 | lower_32_bits(obj->gtt_offset)); | 449 | lower_32_bits(obj->gtt_offset)); |
450 | print_error_obj(m, obj); | 450 | print_error_obj(m, obj); |
451 | } | 451 | } |
452 | 452 | ||
453 | if (error->ring[i].num_requests) { | 453 | if (error->ring[i].num_requests) { |
454 | err_printf(m, "%s --- %d requests\n", | 454 | err_printf(m, "%s --- %d requests\n", |
455 | dev_priv->ring[i].name, | 455 | dev_priv->engine[i].name, |
456 | error->ring[i].num_requests); | 456 | error->ring[i].num_requests); |
457 | for (j = 0; j < error->ring[i].num_requests; j++) { | 457 | for (j = 0; j < error->ring[i].num_requests; j++) { |
458 | err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", | 458 | err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", |
@@ -464,7 +464,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
464 | 464 | ||
465 | if ((obj = error->ring[i].ringbuffer)) { | 465 | if ((obj = error->ring[i].ringbuffer)) { |
466 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", | 466 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", |
467 | dev_priv->ring[i].name, | 467 | dev_priv->engine[i].name, |
468 | lower_32_bits(obj->gtt_offset)); | 468 | lower_32_bits(obj->gtt_offset)); |
469 | print_error_obj(m, obj); | 469 | print_error_obj(m, obj); |
470 | } | 470 | } |
@@ -478,7 +478,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
478 | hws_page = &obj->pages[LRC_PPHWSP_PN][0]; | 478 | hws_page = &obj->pages[LRC_PPHWSP_PN][0]; |
479 | } | 479 | } |
480 | err_printf(m, "%s --- HW Status = 0x%08llx\n", | 480 | err_printf(m, "%s --- HW Status = 0x%08llx\n", |
481 | dev_priv->ring[i].name, hws_offset); | 481 | dev_priv->engine[i].name, hws_offset); |
482 | offset = 0; | 482 | offset = 0; |
483 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { | 483 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { |
484 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", | 484 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
@@ -495,12 +495,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
495 | if (obj) { | 495 | if (obj) { |
496 | u64 wa_ctx_offset = obj->gtt_offset; | 496 | u64 wa_ctx_offset = obj->gtt_offset; |
497 | u32 *wa_ctx_page = &obj->pages[0][0]; | 497 | u32 *wa_ctx_page = &obj->pages[0][0]; |
498 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 498 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
499 | u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size + | 499 | u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size + |
500 | engine->wa_ctx.per_ctx.size); | 500 | engine->wa_ctx.per_ctx.size); |
501 | 501 | ||
502 | err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n", | 502 | err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n", |
503 | dev_priv->ring[i].name, wa_ctx_offset); | 503 | dev_priv->engine[i].name, wa_ctx_offset); |
504 | offset = 0; | 504 | offset = 0; |
505 | for (elt = 0; elt < wa_ctx_size; elt += 4) { | 505 | for (elt = 0; elt < wa_ctx_size; elt += 4) { |
506 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", | 506 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
@@ -515,7 +515,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
515 | 515 | ||
516 | if ((obj = error->ring[i].ctx)) { | 516 | if ((obj = error->ring[i].ctx)) { |
517 | err_printf(m, "%s --- HW Context = 0x%08x\n", | 517 | err_printf(m, "%s --- HW Context = 0x%08x\n", |
518 | dev_priv->ring[i].name, | 518 | dev_priv->engine[i].name, |
519 | lower_32_bits(obj->gtt_offset)); | 519 | lower_32_bits(obj->gtt_offset)); |
520 | print_error_obj(m, obj); | 520 | print_error_obj(m, obj); |
521 | } | 521 | } |
@@ -1020,7 +1020,7 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
1020 | int i, count; | 1020 | int i, count; |
1021 | 1021 | ||
1022 | for (i = 0; i < I915_NUM_RINGS; i++) { | 1022 | for (i = 0; i < I915_NUM_RINGS; i++) { |
1023 | struct intel_engine_cs *engine = &dev_priv->ring[i]; | 1023 | struct intel_engine_cs *engine = &dev_priv->engine[i]; |
1024 | struct intel_ringbuffer *rbuf; | 1024 | struct intel_ringbuffer *rbuf; |
1025 | 1025 | ||
1026 | error->ring[i].pid = -1; | 1026 | error->ring[i].pid = -1; |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 15a4beb387d4..ed4f0762b263 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
@@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, | |||
542 | wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; | 542 | wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; |
543 | wqi->header = WQ_TYPE_INORDER | | 543 | wqi->header = WQ_TYPE_INORDER | |
544 | (wq_len << WQ_LEN_SHIFT) | | 544 | (wq_len << WQ_LEN_SHIFT) | |
545 | (rq->ring->guc_id << WQ_TARGET_SHIFT) | | 545 | (rq->engine->guc_id << WQ_TARGET_SHIFT) | |
546 | WQ_NO_WCFLUSH_WAIT; | 546 | WQ_NO_WCFLUSH_WAIT; |
547 | 547 | ||
548 | /* The GuC wants only the low-order word of the context descriptor */ | 548 | /* The GuC wants only the low-order word of the context descriptor */ |
549 | wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring); | 549 | wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, |
550 | rq->engine); | ||
550 | 551 | ||
551 | /* The GuC firmware wants the tail index in QWords, not bytes */ | 552 | /* The GuC firmware wants the tail index in QWords, not bytes */ |
552 | tail = rq->ringbuf->tail >> 3; | 553 | tail = rq->ringbuf->tail >> 3; |
@@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client, | |||
569 | struct drm_i915_gem_request *rq) | 570 | struct drm_i915_gem_request *rq) |
570 | { | 571 | { |
571 | struct intel_guc *guc = client->guc; | 572 | struct intel_guc *guc = client->guc; |
572 | unsigned int engine_id = rq->ring->guc_id; | 573 | unsigned int engine_id = rq->engine->guc_id; |
573 | int q_ret, b_ret; | 574 | int q_ret, b_ret; |
574 | 575 | ||
575 | q_ret = guc_add_workqueue_item(client, rq); | 576 | q_ret = guc_add_workqueue_item(client, rq); |
@@ -867,7 +868,7 @@ static void guc_create_ads(struct intel_guc *guc) | |||
867 | * so its address won't change after we've told the GuC where | 868 | * so its address won't change after we've told the GuC where |
868 | * to find it. | 869 | * to find it. |
869 | */ | 870 | */ |
870 | engine = &dev_priv->ring[RCS]; | 871 | engine = &dev_priv->engine[RCS]; |
871 | ads->golden_context_lrca = engine->status_page.gfx_addr; | 872 | ads->golden_context_lrca = engine->status_page.gfx_addr; |
872 | 873 | ||
873 | for_each_ring(engine, dev_priv, i) | 874 | for_each_ring(engine, dev_priv, i) |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64658961a7e5..6b7bee59e0c7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1291,9 +1291,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev, | |||
1291 | { | 1291 | { |
1292 | if (gt_iir & | 1292 | if (gt_iir & |
1293 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | 1293 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
1294 | notify_ring(&dev_priv->ring[RCS]); | 1294 | notify_ring(&dev_priv->engine[RCS]); |
1295 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | 1295 | if (gt_iir & ILK_BSD_USER_INTERRUPT) |
1296 | notify_ring(&dev_priv->ring[VCS]); | 1296 | notify_ring(&dev_priv->engine[VCS]); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | static void snb_gt_irq_handler(struct drm_device *dev, | 1299 | static void snb_gt_irq_handler(struct drm_device *dev, |
@@ -1303,11 +1303,11 @@ static void snb_gt_irq_handler(struct drm_device *dev, | |||
1303 | 1303 | ||
1304 | if (gt_iir & | 1304 | if (gt_iir & |
1305 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | 1305 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) |
1306 | notify_ring(&dev_priv->ring[RCS]); | 1306 | notify_ring(&dev_priv->engine[RCS]); |
1307 | if (gt_iir & GT_BSD_USER_INTERRUPT) | 1307 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
1308 | notify_ring(&dev_priv->ring[VCS]); | 1308 | notify_ring(&dev_priv->engine[VCS]); |
1309 | if (gt_iir & GT_BLT_USER_INTERRUPT) | 1309 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
1310 | notify_ring(&dev_priv->ring[BCS]); | 1310 | notify_ring(&dev_priv->engine[BCS]); |
1311 | 1311 | ||
1312 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | | 1312 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1313 | GT_BSD_CS_ERROR_INTERRUPT | | 1313 | GT_BSD_CS_ERROR_INTERRUPT | |
@@ -1338,11 +1338,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
1338 | I915_WRITE_FW(GEN8_GT_IIR(0), iir); | 1338 | I915_WRITE_FW(GEN8_GT_IIR(0), iir); |
1339 | ret = IRQ_HANDLED; | 1339 | ret = IRQ_HANDLED; |
1340 | 1340 | ||
1341 | gen8_cs_irq_handler(&dev_priv->ring[RCS], | 1341 | gen8_cs_irq_handler(&dev_priv->engine[RCS], |
1342 | iir, GEN8_RCS_IRQ_SHIFT); | 1342 | iir, GEN8_RCS_IRQ_SHIFT); |
1343 | 1343 | ||
1344 | gen8_cs_irq_handler(&dev_priv->ring[BCS], | 1344 | gen8_cs_irq_handler(&dev_priv->engine[BCS], |
1345 | iir, GEN8_BCS_IRQ_SHIFT); | 1345 | iir, GEN8_BCS_IRQ_SHIFT); |
1346 | } else | 1346 | } else |
1347 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | 1347 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
1348 | } | 1348 | } |
@@ -1353,11 +1353,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
1353 | I915_WRITE_FW(GEN8_GT_IIR(1), iir); | 1353 | I915_WRITE_FW(GEN8_GT_IIR(1), iir); |
1354 | ret = IRQ_HANDLED; | 1354 | ret = IRQ_HANDLED; |
1355 | 1355 | ||
1356 | gen8_cs_irq_handler(&dev_priv->ring[VCS], | 1356 | gen8_cs_irq_handler(&dev_priv->engine[VCS], |
1357 | iir, GEN8_VCS1_IRQ_SHIFT); | 1357 | iir, GEN8_VCS1_IRQ_SHIFT); |
1358 | 1358 | ||
1359 | gen8_cs_irq_handler(&dev_priv->ring[VCS2], | 1359 | gen8_cs_irq_handler(&dev_priv->engine[VCS2], |
1360 | iir, GEN8_VCS2_IRQ_SHIFT); | 1360 | iir, GEN8_VCS2_IRQ_SHIFT); |
1361 | } else | 1361 | } else |
1362 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | 1362 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
1363 | } | 1363 | } |
@@ -1368,8 +1368,8 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
1368 | I915_WRITE_FW(GEN8_GT_IIR(3), iir); | 1368 | I915_WRITE_FW(GEN8_GT_IIR(3), iir); |
1369 | ret = IRQ_HANDLED; | 1369 | ret = IRQ_HANDLED; |
1370 | 1370 | ||
1371 | gen8_cs_irq_handler(&dev_priv->ring[VECS], | 1371 | gen8_cs_irq_handler(&dev_priv->engine[VECS], |
1372 | iir, GEN8_VECS_IRQ_SHIFT); | 1372 | iir, GEN8_VECS_IRQ_SHIFT); |
1373 | } else | 1373 | } else |
1374 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | 1374 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
1375 | } | 1375 | } |
@@ -1629,7 +1629,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
1629 | 1629 | ||
1630 | if (HAS_VEBOX(dev_priv->dev)) { | 1630 | if (HAS_VEBOX(dev_priv->dev)) { |
1631 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | 1631 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) |
1632 | notify_ring(&dev_priv->ring[VECS]); | 1632 | notify_ring(&dev_priv->engine[VECS]); |
1633 | 1633 | ||
1634 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) | 1634 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) |
1635 | DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); | 1635 | DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); |
@@ -4042,7 +4042,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
4042 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | 4042 | new_iir = I915_READ16(IIR); /* Flush posted writes */ |
4043 | 4043 | ||
4044 | if (iir & I915_USER_INTERRUPT) | 4044 | if (iir & I915_USER_INTERRUPT) |
4045 | notify_ring(&dev_priv->ring[RCS]); | 4045 | notify_ring(&dev_priv->engine[RCS]); |
4046 | 4046 | ||
4047 | for_each_pipe(dev_priv, pipe) { | 4047 | for_each_pipe(dev_priv, pipe) { |
4048 | int plane = pipe; | 4048 | int plane = pipe; |
@@ -4238,7 +4238,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4238 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 4238 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
4239 | 4239 | ||
4240 | if (iir & I915_USER_INTERRUPT) | 4240 | if (iir & I915_USER_INTERRUPT) |
4241 | notify_ring(&dev_priv->ring[RCS]); | 4241 | notify_ring(&dev_priv->engine[RCS]); |
4242 | 4242 | ||
4243 | for_each_pipe(dev_priv, pipe) { | 4243 | for_each_pipe(dev_priv, pipe) { |
4244 | int plane = pipe; | 4244 | int plane = pipe; |
@@ -4468,9 +4468,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4468 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 4468 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
4469 | 4469 | ||
4470 | if (iir & I915_USER_INTERRUPT) | 4470 | if (iir & I915_USER_INTERRUPT) |
4471 | notify_ring(&dev_priv->ring[RCS]); | 4471 | notify_ring(&dev_priv->engine[RCS]); |
4472 | if (iir & I915_BSD_USER_INTERRUPT) | 4472 | if (iir & I915_BSD_USER_INTERRUPT) |
4473 | notify_ring(&dev_priv->ring[VCS]); | 4473 | notify_ring(&dev_priv->engine[VCS]); |
4474 | 4474 | ||
4475 | for_each_pipe(dev_priv, pipe) { | 4475 | for_each_pipe(dev_priv, pipe) { |
4476 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | 4476 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fa09e5581137..923cf6e4d8b6 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, | |||
464 | TP_fast_assign( | 464 | TP_fast_assign( |
465 | __entry->dev = from->dev->primary->index; | 465 | __entry->dev = from->dev->primary->index; |
466 | __entry->sync_from = from->id; | 466 | __entry->sync_from = from->id; |
467 | __entry->sync_to = to_req->ring->id; | 467 | __entry->sync_to = to_req->engine->id; |
468 | __entry->seqno = i915_gem_request_get_seqno(req); | 468 | __entry->seqno = i915_gem_request_get_seqno(req); |
469 | ), | 469 | ), |
470 | 470 | ||
@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, | |||
486 | ), | 486 | ), |
487 | 487 | ||
488 | TP_fast_assign( | 488 | TP_fast_assign( |
489 | struct intel_engine_cs *ring = | 489 | struct intel_engine_cs *engine = |
490 | i915_gem_request_get_ring(req); | 490 | i915_gem_request_get_ring(req); |
491 | __entry->dev = ring->dev->primary->index; | 491 | __entry->dev = engine->dev->primary->index; |
492 | __entry->ring = ring->id; | 492 | __entry->ring = engine->id; |
493 | __entry->seqno = i915_gem_request_get_seqno(req); | 493 | __entry->seqno = i915_gem_request_get_seqno(req); |
494 | __entry->flags = flags; | 494 | __entry->flags = flags; |
495 | i915_trace_irq_get(ring, req); | 495 | i915_trace_irq_get(engine, req); |
496 | ), | 496 | ), |
497 | 497 | ||
498 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", | 498 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", |
@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush, | |||
511 | ), | 511 | ), |
512 | 512 | ||
513 | TP_fast_assign( | 513 | TP_fast_assign( |
514 | __entry->dev = req->ring->dev->primary->index; | 514 | __entry->dev = req->engine->dev->primary->index; |
515 | __entry->ring = req->ring->id; | 515 | __entry->ring = req->engine->id; |
516 | __entry->invalidate = invalidate; | 516 | __entry->invalidate = invalidate; |
517 | __entry->flush = flush; | 517 | __entry->flush = flush; |
518 | ), | 518 | ), |
@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request, | |||
533 | ), | 533 | ), |
534 | 534 | ||
535 | TP_fast_assign( | 535 | TP_fast_assign( |
536 | struct intel_engine_cs *ring = | 536 | struct intel_engine_cs *engine = |
537 | i915_gem_request_get_ring(req); | 537 | i915_gem_request_get_ring(req); |
538 | __entry->dev = ring->dev->primary->index; | 538 | __entry->dev = engine->dev->primary->index; |
539 | __entry->ring = ring->id; | 539 | __entry->ring = engine->id; |
540 | __entry->seqno = i915_gem_request_get_seqno(req); | 540 | __entry->seqno = i915_gem_request_get_seqno(req); |
541 | ), | 541 | ), |
542 | 542 | ||
@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add, | |||
550 | ); | 550 | ); |
551 | 551 | ||
552 | TRACE_EVENT(i915_gem_request_notify, | 552 | TRACE_EVENT(i915_gem_request_notify, |
553 | TP_PROTO(struct intel_engine_cs *ring), | 553 | TP_PROTO(struct intel_engine_cs *engine), |
554 | TP_ARGS(ring), | 554 | TP_ARGS(engine), |
555 | 555 | ||
556 | TP_STRUCT__entry( | 556 | TP_STRUCT__entry( |
557 | __field(u32, dev) | 557 | __field(u32, dev) |
@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify, | |||
560 | ), | 560 | ), |
561 | 561 | ||
562 | TP_fast_assign( | 562 | TP_fast_assign( |
563 | __entry->dev = ring->dev->primary->index; | 563 | __entry->dev = engine->dev->primary->index; |
564 | __entry->ring = ring->id; | 564 | __entry->ring = engine->id; |
565 | __entry->seqno = ring->get_seqno(ring, false); | 565 | __entry->seqno = engine->get_seqno(engine, false); |
566 | ), | 566 | ), |
567 | 567 | ||
568 | TP_printk("dev=%u, ring=%u, seqno=%u", | 568 | TP_printk("dev=%u, ring=%u, seqno=%u", |
@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, | |||
597 | * less desirable. | 597 | * less desirable. |
598 | */ | 598 | */ |
599 | TP_fast_assign( | 599 | TP_fast_assign( |
600 | struct intel_engine_cs *ring = | 600 | struct intel_engine_cs *engine = |
601 | i915_gem_request_get_ring(req); | 601 | i915_gem_request_get_ring(req); |
602 | __entry->dev = ring->dev->primary->index; | 602 | __entry->dev = engine->dev->primary->index; |
603 | __entry->ring = ring->id; | 603 | __entry->ring = engine->id; |
604 | __entry->seqno = i915_gem_request_get_seqno(req); | 604 | __entry->seqno = i915_gem_request_get_seqno(req); |
605 | __entry->blocking = | 605 | __entry->blocking = |
606 | mutex_is_locked(&ring->dev->struct_mutex); | 606 | mutex_is_locked(&engine->dev->struct_mutex); |
607 | ), | 607 | ), |
608 | 608 | ||
609 | TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", | 609 | TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", |
@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free, | |||
777 | * called only if full ppgtt is enabled. | 777 | * called only if full ppgtt is enabled. |
778 | */ | 778 | */ |
779 | TRACE_EVENT(switch_mm, | 779 | TRACE_EVENT(switch_mm, |
780 | TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to), | 780 | TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), |
781 | 781 | ||
782 | TP_ARGS(ring, to), | 782 | TP_ARGS(engine, to), |
783 | 783 | ||
784 | TP_STRUCT__entry( | 784 | TP_STRUCT__entry( |
785 | __field(u32, ring) | 785 | __field(u32, ring) |
@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm, | |||
789 | ), | 789 | ), |
790 | 790 | ||
791 | TP_fast_assign( | 791 | TP_fast_assign( |
792 | __entry->ring = ring->id; | 792 | __entry->ring = engine->id; |
793 | __entry->to = to; | 793 | __entry->to = to; |
794 | __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; | 794 | __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; |
795 | __entry->dev = ring->dev->primary->index; | 795 | __entry->dev = engine->dev->primary->index; |
796 | ), | 796 | ), |
797 | 797 | ||
798 | TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", | 798 | TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 317b55b0b596..f271b0f706e4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10984,7 +10984,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
10984 | struct drm_i915_gem_request *req, | 10984 | struct drm_i915_gem_request *req, |
10985 | uint32_t flags) | 10985 | uint32_t flags) |
10986 | { | 10986 | { |
10987 | struct intel_engine_cs *engine = req->ring; | 10987 | struct intel_engine_cs *engine = req->engine; |
10988 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10988 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
10989 | u32 flip_mask; | 10989 | u32 flip_mask; |
10990 | int ret; | 10990 | int ret; |
@@ -11019,7 +11019,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
11019 | struct drm_i915_gem_request *req, | 11019 | struct drm_i915_gem_request *req, |
11020 | uint32_t flags) | 11020 | uint32_t flags) |
11021 | { | 11021 | { |
11022 | struct intel_engine_cs *engine = req->ring; | 11022 | struct intel_engine_cs *engine = req->engine; |
11023 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11023 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11024 | u32 flip_mask; | 11024 | u32 flip_mask; |
11025 | int ret; | 11025 | int ret; |
@@ -11051,7 +11051,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
11051 | struct drm_i915_gem_request *req, | 11051 | struct drm_i915_gem_request *req, |
11052 | uint32_t flags) | 11052 | uint32_t flags) |
11053 | { | 11053 | { |
11054 | struct intel_engine_cs *engine = req->ring; | 11054 | struct intel_engine_cs *engine = req->engine; |
11055 | struct drm_i915_private *dev_priv = dev->dev_private; | 11055 | struct drm_i915_private *dev_priv = dev->dev_private; |
11056 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11056 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11057 | uint32_t pf, pipesrc; | 11057 | uint32_t pf, pipesrc; |
@@ -11090,7 +11090,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | |||
11090 | struct drm_i915_gem_request *req, | 11090 | struct drm_i915_gem_request *req, |
11091 | uint32_t flags) | 11091 | uint32_t flags) |
11092 | { | 11092 | { |
11093 | struct intel_engine_cs *engine = req->ring; | 11093 | struct intel_engine_cs *engine = req->engine; |
11094 | struct drm_i915_private *dev_priv = dev->dev_private; | 11094 | struct drm_i915_private *dev_priv = dev->dev_private; |
11095 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11095 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11096 | uint32_t pf, pipesrc; | 11096 | uint32_t pf, pipesrc; |
@@ -11126,7 +11126,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
11126 | struct drm_i915_gem_request *req, | 11126 | struct drm_i915_gem_request *req, |
11127 | uint32_t flags) | 11127 | uint32_t flags) |
11128 | { | 11128 | { |
11129 | struct intel_engine_cs *engine = req->ring; | 11129 | struct intel_engine_cs *engine = req->engine; |
11130 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11130 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11131 | uint32_t plane_bit = 0; | 11131 | uint32_t plane_bit = 0; |
11132 | int len, ret; | 11132 | int len, ret; |
@@ -11575,18 +11575,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
11575 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; | 11575 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; |
11576 | 11576 | ||
11577 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 11577 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
11578 | engine = &dev_priv->ring[BCS]; | 11578 | engine = &dev_priv->engine[BCS]; |
11579 | if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) | 11579 | if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) |
11580 | /* vlv: DISPLAY_FLIP fails to change tiling */ | 11580 | /* vlv: DISPLAY_FLIP fails to change tiling */ |
11581 | engine = NULL; | 11581 | engine = NULL; |
11582 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | 11582 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
11583 | engine = &dev_priv->ring[BCS]; | 11583 | engine = &dev_priv->engine[BCS]; |
11584 | } else if (INTEL_INFO(dev)->gen >= 7) { | 11584 | } else if (INTEL_INFO(dev)->gen >= 7) { |
11585 | engine = i915_gem_request_get_ring(obj->last_write_req); | 11585 | engine = i915_gem_request_get_ring(obj->last_write_req); |
11586 | if (engine == NULL || engine->id != RCS) | 11586 | if (engine == NULL || engine->id != RCS) |
11587 | engine = &dev_priv->ring[BCS]; | 11587 | engine = &dev_priv->engine[BCS]; |
11588 | } else { | 11588 | } else { |
11589 | engine = &dev_priv->ring[RCS]; | 11589 | engine = &dev_priv->engine[RCS]; |
11590 | } | 11590 | } |
11591 | 11591 | ||
11592 | mmio_flip = use_mmio_flip(engine, obj); | 11592 | mmio_flip = use_mmio_flip(engine, obj); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 25514e91479a..bbcc31f4b15d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -360,19 +360,19 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, | |||
360 | struct drm_i915_gem_request *rq1) | 360 | struct drm_i915_gem_request *rq1) |
361 | { | 361 | { |
362 | 362 | ||
363 | struct intel_engine_cs *engine = rq0->ring; | 363 | struct intel_engine_cs *engine = rq0->engine; |
364 | struct drm_device *dev = engine->dev; | 364 | struct drm_device *dev = engine->dev; |
365 | struct drm_i915_private *dev_priv = dev->dev_private; | 365 | struct drm_i915_private *dev_priv = dev->dev_private; |
366 | uint64_t desc[2]; | 366 | uint64_t desc[2]; |
367 | 367 | ||
368 | if (rq1) { | 368 | if (rq1) { |
369 | desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); | 369 | desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine); |
370 | rq1->elsp_submitted++; | 370 | rq1->elsp_submitted++; |
371 | } else { | 371 | } else { |
372 | desc[1] = 0; | 372 | desc[1] = 0; |
373 | } | 373 | } |
374 | 374 | ||
375 | desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring); | 375 | desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine); |
376 | rq0->elsp_submitted++; | 376 | rq0->elsp_submitted++; |
377 | 377 | ||
378 | /* You must always write both descriptors in the order below. */ | 378 | /* You must always write both descriptors in the order below. */ |
@@ -398,7 +398,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) | |||
398 | 398 | ||
399 | static void execlists_update_context(struct drm_i915_gem_request *rq) | 399 | static void execlists_update_context(struct drm_i915_gem_request *rq) |
400 | { | 400 | { |
401 | struct intel_engine_cs *engine = rq->ring; | 401 | struct intel_engine_cs *engine = rq->engine; |
402 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; | 402 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; |
403 | uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; | 403 | uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; |
404 | 404 | ||
@@ -611,7 +611,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *engine) | |||
611 | 611 | ||
612 | static void execlists_context_queue(struct drm_i915_gem_request *request) | 612 | static void execlists_context_queue(struct drm_i915_gem_request *request) |
613 | { | 613 | { |
614 | struct intel_engine_cs *engine = request->ring; | 614 | struct intel_engine_cs *engine = request->engine; |
615 | struct drm_i915_gem_request *cursor; | 615 | struct drm_i915_gem_request *cursor; |
616 | int num_elements = 0; | 616 | int num_elements = 0; |
617 | 617 | ||
@@ -650,7 +650,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) | |||
650 | 650 | ||
651 | static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) | 651 | static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) |
652 | { | 652 | { |
653 | struct intel_engine_cs *engine = req->ring; | 653 | struct intel_engine_cs *engine = req->engine; |
654 | uint32_t flush_domains; | 654 | uint32_t flush_domains; |
655 | int ret; | 655 | int ret; |
656 | 656 | ||
@@ -669,7 +669,7 @@ static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) | |||
669 | static int execlists_move_to_gpu(struct drm_i915_gem_request *req, | 669 | static int execlists_move_to_gpu(struct drm_i915_gem_request *req, |
670 | struct list_head *vmas) | 670 | struct list_head *vmas) |
671 | { | 671 | { |
672 | const unsigned other_rings = ~intel_ring_flag(req->ring); | 672 | const unsigned other_rings = ~intel_ring_flag(req->engine); |
673 | struct i915_vma *vma; | 673 | struct i915_vma *vma; |
674 | uint32_t flush_domains = 0; | 674 | uint32_t flush_domains = 0; |
675 | bool flush_chipset = false; | 675 | bool flush_chipset = false; |
@@ -679,7 +679,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, | |||
679 | struct drm_i915_gem_object *obj = vma->obj; | 679 | struct drm_i915_gem_object *obj = vma->obj; |
680 | 680 | ||
681 | if (obj->active & other_rings) { | 681 | if (obj->active & other_rings) { |
682 | ret = i915_gem_object_sync(obj, req->ring, &req); | 682 | ret = i915_gem_object_sync(obj, req->engine, &req); |
683 | if (ret) | 683 | if (ret) |
684 | return ret; | 684 | return ret; |
685 | } | 685 | } |
@@ -703,7 +703,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request | |||
703 | { | 703 | { |
704 | int ret = 0; | 704 | int ret = 0; |
705 | 705 | ||
706 | request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; | 706 | request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; |
707 | 707 | ||
708 | if (i915.enable_guc_submission) { | 708 | if (i915.enable_guc_submission) { |
709 | /* | 709 | /* |
@@ -719,7 +719,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request | |||
719 | } | 719 | } |
720 | 720 | ||
721 | if (request->ctx != request->i915->kernel_context) | 721 | if (request->ctx != request->i915->kernel_context) |
722 | ret = intel_lr_context_pin(request->ctx, request->ring); | 722 | ret = intel_lr_context_pin(request->ctx, request->engine); |
723 | 723 | ||
724 | return ret; | 724 | return ret; |
725 | } | 725 | } |
@@ -728,7 +728,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, | |||
728 | int bytes) | 728 | int bytes) |
729 | { | 729 | { |
730 | struct intel_ringbuffer *ringbuf = req->ringbuf; | 730 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
731 | struct intel_engine_cs *engine = req->ring; | 731 | struct intel_engine_cs *engine = req->engine; |
732 | struct drm_i915_gem_request *target; | 732 | struct drm_i915_gem_request *target; |
733 | unsigned space; | 733 | unsigned space; |
734 | int ret; | 734 | int ret; |
@@ -780,7 +780,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) | |||
780 | { | 780 | { |
781 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 781 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
782 | struct drm_i915_private *dev_priv = request->i915; | 782 | struct drm_i915_private *dev_priv = request->i915; |
783 | struct intel_engine_cs *engine = request->ring; | 783 | struct intel_engine_cs *engine = request->engine; |
784 | 784 | ||
785 | intel_logical_ring_advance(ringbuf); | 785 | intel_logical_ring_advance(ringbuf); |
786 | request->tail = ringbuf->tail; | 786 | request->tail = ringbuf->tail; |
@@ -897,7 +897,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | |||
897 | int ret; | 897 | int ret; |
898 | 898 | ||
899 | WARN_ON(req == NULL); | 899 | WARN_ON(req == NULL); |
900 | dev_priv = req->ring->dev->dev_private; | 900 | dev_priv = req->engine->dev->dev_private; |
901 | 901 | ||
902 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 902 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
903 | dev_priv->mm.interruptible); | 903 | dev_priv->mm.interruptible); |
@@ -949,7 +949,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
949 | struct list_head *vmas) | 949 | struct list_head *vmas) |
950 | { | 950 | { |
951 | struct drm_device *dev = params->dev; | 951 | struct drm_device *dev = params->dev; |
952 | struct intel_engine_cs *engine = params->ring; | 952 | struct intel_engine_cs *engine = params->engine; |
953 | struct drm_i915_private *dev_priv = dev->dev_private; | 953 | struct drm_i915_private *dev_priv = dev->dev_private; |
954 | struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; | 954 | struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; |
955 | u64 exec_start; | 955 | u64 exec_start; |
@@ -963,7 +963,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
963 | case I915_EXEC_CONSTANTS_REL_GENERAL: | 963 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
964 | case I915_EXEC_CONSTANTS_ABSOLUTE: | 964 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
965 | case I915_EXEC_CONSTANTS_REL_SURFACE: | 965 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
966 | if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) { | 966 | if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { |
967 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); | 967 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); |
968 | return -EINVAL; | 968 | return -EINVAL; |
969 | } | 969 | } |
@@ -992,7 +992,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
992 | if (ret) | 992 | if (ret) |
993 | return ret; | 993 | return ret; |
994 | 994 | ||
995 | if (engine == &dev_priv->ring[RCS] && | 995 | if (engine == &dev_priv->engine[RCS] && |
996 | instp_mode != dev_priv->relative_constants_mode) { | 996 | instp_mode != dev_priv->relative_constants_mode) { |
997 | ret = intel_logical_ring_begin(params->request, 4); | 997 | ret = intel_logical_ring_begin(params->request, 4); |
998 | if (ret) | 998 | if (ret) |
@@ -1073,7 +1073,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) | |||
1073 | 1073 | ||
1074 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) | 1074 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) |
1075 | { | 1075 | { |
1076 | struct intel_engine_cs *engine = req->ring; | 1076 | struct intel_engine_cs *engine = req->engine; |
1077 | int ret; | 1077 | int ret; |
1078 | 1078 | ||
1079 | if (!engine->gpu_caches_dirty) | 1079 | if (!engine->gpu_caches_dirty) |
@@ -1174,7 +1174,7 @@ void intel_lr_context_unpin(struct intel_context *ctx, | |||
1174 | static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | 1174 | static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) |
1175 | { | 1175 | { |
1176 | int ret, i; | 1176 | int ret, i; |
1177 | struct intel_engine_cs *engine = req->ring; | 1177 | struct intel_engine_cs *engine = req->engine; |
1178 | struct intel_ringbuffer *ringbuf = req->ringbuf; | 1178 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
1179 | struct drm_device *dev = engine->dev; | 1179 | struct drm_device *dev = engine->dev; |
1180 | struct drm_i915_private *dev_priv = dev->dev_private; | 1180 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1647,7 +1647,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) | |||
1647 | static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) | 1647 | static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) |
1648 | { | 1648 | { |
1649 | struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; | 1649 | struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; |
1650 | struct intel_engine_cs *engine = req->ring; | 1650 | struct intel_engine_cs *engine = req->engine; |
1651 | struct intel_ringbuffer *ringbuf = req->ringbuf; | 1651 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
1652 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; | 1652 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; |
1653 | int i, ret; | 1653 | int i, ret; |
@@ -1688,7 +1688,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | |||
1688 | * not idle). PML4 is allocated during ppgtt init so this is | 1688 | * not idle). PML4 is allocated during ppgtt init so this is |
1689 | * not needed in 48-bit.*/ | 1689 | * not needed in 48-bit.*/ |
1690 | if (req->ctx->ppgtt && | 1690 | if (req->ctx->ppgtt && |
1691 | (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { | 1691 | (intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { |
1692 | if (!USES_FULL_48BIT_PPGTT(req->i915) && | 1692 | if (!USES_FULL_48BIT_PPGTT(req->i915) && |
1693 | !intel_vgpu_active(req->i915->dev)) { | 1693 | !intel_vgpu_active(req->i915->dev)) { |
1694 | ret = intel_logical_ring_emit_pdps(req); | 1694 | ret = intel_logical_ring_emit_pdps(req); |
@@ -1696,7 +1696,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | |||
1696 | return ret; | 1696 | return ret; |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); | 1699 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine); |
1700 | } | 1700 | } |
1701 | 1701 | ||
1702 | ret = intel_logical_ring_begin(req, 4); | 1702 | ret = intel_logical_ring_begin(req, 4); |
@@ -1755,7 +1755,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
1755 | u32 unused) | 1755 | u32 unused) |
1756 | { | 1756 | { |
1757 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1757 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1758 | struct intel_engine_cs *engine = ringbuf->ring; | 1758 | struct intel_engine_cs *engine = ringbuf->engine; |
1759 | struct drm_device *dev = engine->dev; | 1759 | struct drm_device *dev = engine->dev; |
1760 | struct drm_i915_private *dev_priv = dev->dev_private; | 1760 | struct drm_i915_private *dev_priv = dev->dev_private; |
1761 | uint32_t cmd; | 1761 | uint32_t cmd; |
@@ -1776,7 +1776,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
1776 | 1776 | ||
1777 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) { | 1777 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) { |
1778 | cmd |= MI_INVALIDATE_TLB; | 1778 | cmd |= MI_INVALIDATE_TLB; |
1779 | if (engine == &dev_priv->ring[VCS]) | 1779 | if (engine == &dev_priv->engine[VCS]) |
1780 | cmd |= MI_INVALIDATE_BSD; | 1780 | cmd |= MI_INVALIDATE_BSD; |
1781 | } | 1781 | } |
1782 | 1782 | ||
@@ -1796,7 +1796,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
1796 | u32 flush_domains) | 1796 | u32 flush_domains) |
1797 | { | 1797 | { |
1798 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1798 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1799 | struct intel_engine_cs *engine = ringbuf->ring; | 1799 | struct intel_engine_cs *engine = ringbuf->engine; |
1800 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 1800 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
1801 | bool vf_flush_wa = false; | 1801 | bool vf_flush_wa = false; |
1802 | u32 flags = 0; | 1802 | u32 flags = 0; |
@@ -1919,7 +1919,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) | |||
1919 | intel_logical_ring_emit(ringbuf, | 1919 | intel_logical_ring_emit(ringbuf, |
1920 | (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); | 1920 | (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); |
1921 | intel_logical_ring_emit(ringbuf, | 1921 | intel_logical_ring_emit(ringbuf, |
1922 | hws_seqno_address(request->ring) | | 1922 | hws_seqno_address(request->engine) | |
1923 | MI_FLUSH_DW_USE_GTT); | 1923 | MI_FLUSH_DW_USE_GTT); |
1924 | intel_logical_ring_emit(ringbuf, 0); | 1924 | intel_logical_ring_emit(ringbuf, 0); |
1925 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); | 1925 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
@@ -1946,7 +1946,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) | |||
1946 | (PIPE_CONTROL_GLOBAL_GTT_IVB | | 1946 | (PIPE_CONTROL_GLOBAL_GTT_IVB | |
1947 | PIPE_CONTROL_CS_STALL | | 1947 | PIPE_CONTROL_CS_STALL | |
1948 | PIPE_CONTROL_QW_WRITE)); | 1948 | PIPE_CONTROL_QW_WRITE)); |
1949 | intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); | 1949 | intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); |
1950 | intel_logical_ring_emit(ringbuf, 0); | 1950 | intel_logical_ring_emit(ringbuf, 0); |
1951 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); | 1951 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
1952 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); | 1952 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
@@ -1958,19 +1958,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) | |||
1958 | struct render_state so; | 1958 | struct render_state so; |
1959 | int ret; | 1959 | int ret; |
1960 | 1960 | ||
1961 | ret = i915_gem_render_state_prepare(req->ring, &so); | 1961 | ret = i915_gem_render_state_prepare(req->engine, &so); |
1962 | if (ret) | 1962 | if (ret) |
1963 | return ret; | 1963 | return ret; |
1964 | 1964 | ||
1965 | if (so.rodata == NULL) | 1965 | if (so.rodata == NULL) |
1966 | return 0; | 1966 | return 0; |
1967 | 1967 | ||
1968 | ret = req->ring->emit_bb_start(req, so.ggtt_offset, | 1968 | ret = req->engine->emit_bb_start(req, so.ggtt_offset, |
1969 | I915_DISPATCH_SECURE); | 1969 | I915_DISPATCH_SECURE); |
1970 | if (ret) | 1970 | if (ret) |
1971 | goto out; | 1971 | goto out; |
1972 | 1972 | ||
1973 | ret = req->ring->emit_bb_start(req, | 1973 | ret = req->engine->emit_bb_start(req, |
1974 | (so.ggtt_offset + so.aux_batch_offset), | 1974 | (so.ggtt_offset + so.aux_batch_offset), |
1975 | I915_DISPATCH_SECURE); | 1975 | I915_DISPATCH_SECURE); |
1976 | if (ret) | 1976 | if (ret) |
@@ -2117,7 +2117,7 @@ error: | |||
2117 | static int logical_render_ring_init(struct drm_device *dev) | 2117 | static int logical_render_ring_init(struct drm_device *dev) |
2118 | { | 2118 | { |
2119 | struct drm_i915_private *dev_priv = dev->dev_private; | 2119 | struct drm_i915_private *dev_priv = dev->dev_private; |
2120 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 2120 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
2121 | int ret; | 2121 | int ret; |
2122 | 2122 | ||
2123 | engine->name = "render ring"; | 2123 | engine->name = "render ring"; |
@@ -2170,7 +2170,7 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
2170 | static int logical_bsd_ring_init(struct drm_device *dev) | 2170 | static int logical_bsd_ring_init(struct drm_device *dev) |
2171 | { | 2171 | { |
2172 | struct drm_i915_private *dev_priv = dev->dev_private; | 2172 | struct drm_i915_private *dev_priv = dev->dev_private; |
2173 | struct intel_engine_cs *engine = &dev_priv->ring[VCS]; | 2173 | struct intel_engine_cs *engine = &dev_priv->engine[VCS]; |
2174 | 2174 | ||
2175 | engine->name = "bsd ring"; | 2175 | engine->name = "bsd ring"; |
2176 | engine->id = VCS; | 2176 | engine->id = VCS; |
@@ -2187,7 +2187,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) | |||
2187 | static int logical_bsd2_ring_init(struct drm_device *dev) | 2187 | static int logical_bsd2_ring_init(struct drm_device *dev) |
2188 | { | 2188 | { |
2189 | struct drm_i915_private *dev_priv = dev->dev_private; | 2189 | struct drm_i915_private *dev_priv = dev->dev_private; |
2190 | struct intel_engine_cs *engine = &dev_priv->ring[VCS2]; | 2190 | struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; |
2191 | 2191 | ||
2192 | engine->name = "bsd2 ring"; | 2192 | engine->name = "bsd2 ring"; |
2193 | engine->id = VCS2; | 2193 | engine->id = VCS2; |
@@ -2204,7 +2204,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev) | |||
2204 | static int logical_blt_ring_init(struct drm_device *dev) | 2204 | static int logical_blt_ring_init(struct drm_device *dev) |
2205 | { | 2205 | { |
2206 | struct drm_i915_private *dev_priv = dev->dev_private; | 2206 | struct drm_i915_private *dev_priv = dev->dev_private; |
2207 | struct intel_engine_cs *engine = &dev_priv->ring[BCS]; | 2207 | struct intel_engine_cs *engine = &dev_priv->engine[BCS]; |
2208 | 2208 | ||
2209 | engine->name = "blitter ring"; | 2209 | engine->name = "blitter ring"; |
2210 | engine->id = BCS; | 2210 | engine->id = BCS; |
@@ -2221,7 +2221,7 @@ static int logical_blt_ring_init(struct drm_device *dev) | |||
2221 | static int logical_vebox_ring_init(struct drm_device *dev) | 2221 | static int logical_vebox_ring_init(struct drm_device *dev) |
2222 | { | 2222 | { |
2223 | struct drm_i915_private *dev_priv = dev->dev_private; | 2223 | struct drm_i915_private *dev_priv = dev->dev_private; |
2224 | struct intel_engine_cs *engine = &dev_priv->ring[VECS]; | 2224 | struct intel_engine_cs *engine = &dev_priv->engine[VECS]; |
2225 | 2225 | ||
2226 | engine->name = "video enhancement ring"; | 2226 | engine->name = "video enhancement ring"; |
2227 | engine->id = VECS; | 2227 | engine->id = VECS; |
@@ -2281,13 +2281,13 @@ int intel_logical_rings_init(struct drm_device *dev) | |||
2281 | return 0; | 2281 | return 0; |
2282 | 2282 | ||
2283 | cleanup_vebox_ring: | 2283 | cleanup_vebox_ring: |
2284 | intel_logical_ring_cleanup(&dev_priv->ring[VECS]); | 2284 | intel_logical_ring_cleanup(&dev_priv->engine[VECS]); |
2285 | cleanup_blt_ring: | 2285 | cleanup_blt_ring: |
2286 | intel_logical_ring_cleanup(&dev_priv->ring[BCS]); | 2286 | intel_logical_ring_cleanup(&dev_priv->engine[BCS]); |
2287 | cleanup_bsd_ring: | 2287 | cleanup_bsd_ring: |
2288 | intel_logical_ring_cleanup(&dev_priv->ring[VCS]); | 2288 | intel_logical_ring_cleanup(&dev_priv->engine[VCS]); |
2289 | cleanup_render_ring: | 2289 | cleanup_render_ring: |
2290 | intel_logical_ring_cleanup(&dev_priv->ring[RCS]); | 2290 | intel_logical_ring_cleanup(&dev_priv->engine[RCS]); |
2291 | 2291 | ||
2292 | return ret; | 2292 | return ret; |
2293 | } | 2293 | } |
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index d55925987ebf..2c895637ab50 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
@@ -322,7 +322,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) | |||
322 | struct drm_i915_mocs_table t; | 322 | struct drm_i915_mocs_table t; |
323 | int ret; | 323 | int ret; |
324 | 324 | ||
325 | if (get_mocs_settings(req->ring->dev, &t)) { | 325 | if (get_mocs_settings(req->engine->dev, &t)) { |
326 | struct drm_i915_private *dev_priv = req->i915; | 326 | struct drm_i915_private *dev_priv = req->i915; |
327 | struct intel_engine_cs *engine; | 327 | struct intel_engine_cs *engine; |
328 | enum intel_ring_id ring_id; | 328 | enum intel_ring_id ring_id; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 13b27632636e..13e22f52666c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -233,7 +233,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
233 | { | 233 | { |
234 | struct drm_device *dev = overlay->dev; | 234 | struct drm_device *dev = overlay->dev; |
235 | struct drm_i915_private *dev_priv = dev->dev_private; | 235 | struct drm_i915_private *dev_priv = dev->dev_private; |
236 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 236 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
237 | struct drm_i915_gem_request *req; | 237 | struct drm_i915_gem_request *req; |
238 | int ret; | 238 | int ret; |
239 | 239 | ||
@@ -267,7 +267,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
267 | { | 267 | { |
268 | struct drm_device *dev = overlay->dev; | 268 | struct drm_device *dev = overlay->dev; |
269 | struct drm_i915_private *dev_priv = dev->dev_private; | 269 | struct drm_i915_private *dev_priv = dev->dev_private; |
270 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 270 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
271 | struct drm_i915_gem_request *req; | 271 | struct drm_i915_gem_request *req; |
272 | u32 flip_addr = overlay->flip_addr; | 272 | u32 flip_addr = overlay->flip_addr; |
273 | u32 tmp; | 273 | u32 tmp; |
@@ -336,7 +336,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
336 | { | 336 | { |
337 | struct drm_device *dev = overlay->dev; | 337 | struct drm_device *dev = overlay->dev; |
338 | struct drm_i915_private *dev_priv = dev->dev_private; | 338 | struct drm_i915_private *dev_priv = dev->dev_private; |
339 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 339 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
340 | struct drm_i915_gem_request *req; | 340 | struct drm_i915_gem_request *req; |
341 | u32 flip_addr = overlay->flip_addr; | 341 | u32 flip_addr = overlay->flip_addr; |
342 | int ret; | 342 | int ret; |
@@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
409 | { | 409 | { |
410 | struct drm_device *dev = overlay->dev; | 410 | struct drm_device *dev = overlay->dev; |
411 | struct drm_i915_private *dev_priv = dev->dev_private; | 411 | struct drm_i915_private *dev_priv = dev->dev_private; |
412 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 412 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
413 | int ret; | 413 | int ret; |
414 | 414 | ||
415 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 415 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c54a7df7c2c9..e51c28487696 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -7365,7 +7365,7 @@ static void __intel_rps_boost_work(struct work_struct *work) | |||
7365 | struct drm_i915_gem_request *req = boost->req; | 7365 | struct drm_i915_gem_request *req = boost->req; |
7366 | 7366 | ||
7367 | if (!i915_gem_request_completed(req, true)) | 7367 | if (!i915_gem_request_completed(req, true)) |
7368 | gen6_rps_boost(to_i915(req->ring->dev), NULL, | 7368 | gen6_rps_boost(to_i915(req->engine->dev), NULL, |
7369 | req->emitted_jiffies); | 7369 | req->emitted_jiffies); |
7370 | 7370 | ||
7371 | i915_gem_request_unreference__unlocked(req); | 7371 | i915_gem_request_unreference__unlocked(req); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 53237616ce19..7075b93a8fc9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -79,7 +79,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, | |||
79 | u32 invalidate_domains, | 79 | u32 invalidate_domains, |
80 | u32 flush_domains) | 80 | u32 flush_domains) |
81 | { | 81 | { |
82 | struct intel_engine_cs *engine = req->ring; | 82 | struct intel_engine_cs *engine = req->engine; |
83 | u32 cmd; | 83 | u32 cmd; |
84 | int ret; | 84 | int ret; |
85 | 85 | ||
@@ -106,7 +106,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, | |||
106 | u32 invalidate_domains, | 106 | u32 invalidate_domains, |
107 | u32 flush_domains) | 107 | u32 flush_domains) |
108 | { | 108 | { |
109 | struct intel_engine_cs *engine = req->ring; | 109 | struct intel_engine_cs *engine = req->engine; |
110 | struct drm_device *dev = engine->dev; | 110 | struct drm_device *dev = engine->dev; |
111 | u32 cmd; | 111 | u32 cmd; |
112 | int ret; | 112 | int ret; |
@@ -200,7 +200,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, | |||
200 | static int | 200 | static int |
201 | intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) | 201 | intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) |
202 | { | 202 | { |
203 | struct intel_engine_cs *engine = req->ring; | 203 | struct intel_engine_cs *engine = req->engine; |
204 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 204 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
205 | int ret; | 205 | int ret; |
206 | 206 | ||
@@ -236,7 +236,7 @@ static int | |||
236 | gen6_render_ring_flush(struct drm_i915_gem_request *req, | 236 | gen6_render_ring_flush(struct drm_i915_gem_request *req, |
237 | u32 invalidate_domains, u32 flush_domains) | 237 | u32 invalidate_domains, u32 flush_domains) |
238 | { | 238 | { |
239 | struct intel_engine_cs *engine = req->ring; | 239 | struct intel_engine_cs *engine = req->engine; |
240 | u32 flags = 0; | 240 | u32 flags = 0; |
241 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 241 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
242 | int ret; | 242 | int ret; |
@@ -288,7 +288,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, | |||
288 | static int | 288 | static int |
289 | gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) | 289 | gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) |
290 | { | 290 | { |
291 | struct intel_engine_cs *engine = req->ring; | 291 | struct intel_engine_cs *engine = req->engine; |
292 | int ret; | 292 | int ret; |
293 | 293 | ||
294 | ret = intel_ring_begin(req, 4); | 294 | ret = intel_ring_begin(req, 4); |
@@ -309,7 +309,7 @@ static int | |||
309 | gen7_render_ring_flush(struct drm_i915_gem_request *req, | 309 | gen7_render_ring_flush(struct drm_i915_gem_request *req, |
310 | u32 invalidate_domains, u32 flush_domains) | 310 | u32 invalidate_domains, u32 flush_domains) |
311 | { | 311 | { |
312 | struct intel_engine_cs *engine = req->ring; | 312 | struct intel_engine_cs *engine = req->engine; |
313 | u32 flags = 0; | 313 | u32 flags = 0; |
314 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 314 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
315 | int ret; | 315 | int ret; |
@@ -373,7 +373,7 @@ static int | |||
373 | gen8_emit_pipe_control(struct drm_i915_gem_request *req, | 373 | gen8_emit_pipe_control(struct drm_i915_gem_request *req, |
374 | u32 flags, u32 scratch_addr) | 374 | u32 flags, u32 scratch_addr) |
375 | { | 375 | { |
376 | struct intel_engine_cs *engine = req->ring; | 376 | struct intel_engine_cs *engine = req->engine; |
377 | int ret; | 377 | int ret; |
378 | 378 | ||
379 | ret = intel_ring_begin(req, 6); | 379 | ret = intel_ring_begin(req, 6); |
@@ -396,7 +396,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, | |||
396 | u32 invalidate_domains, u32 flush_domains) | 396 | u32 invalidate_domains, u32 flush_domains) |
397 | { | 397 | { |
398 | u32 flags = 0; | 398 | u32 flags = 0; |
399 | u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 399 | u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
400 | int ret; | 400 | int ret; |
401 | 401 | ||
402 | flags |= PIPE_CONTROL_CS_STALL; | 402 | flags |= PIPE_CONTROL_CS_STALL; |
@@ -704,7 +704,7 @@ err: | |||
704 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) | 704 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) |
705 | { | 705 | { |
706 | int ret, i; | 706 | int ret, i; |
707 | struct intel_engine_cs *engine = req->ring; | 707 | struct intel_engine_cs *engine = req->engine; |
708 | struct drm_device *dev = engine->dev; | 708 | struct drm_device *dev = engine->dev; |
709 | struct drm_i915_private *dev_priv = dev->dev_private; | 709 | struct drm_i915_private *dev_priv = dev->dev_private; |
710 | struct i915_workarounds *w = &dev_priv->workarounds; | 710 | struct i915_workarounds *w = &dev_priv->workarounds; |
@@ -1269,7 +1269,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, | |||
1269 | unsigned int num_dwords) | 1269 | unsigned int num_dwords) |
1270 | { | 1270 | { |
1271 | #define MBOX_UPDATE_DWORDS 8 | 1271 | #define MBOX_UPDATE_DWORDS 8 |
1272 | struct intel_engine_cs *signaller = signaller_req->ring; | 1272 | struct intel_engine_cs *signaller = signaller_req->engine; |
1273 | struct drm_device *dev = signaller->dev; | 1273 | struct drm_device *dev = signaller->dev; |
1274 | struct drm_i915_private *dev_priv = dev->dev_private; | 1274 | struct drm_i915_private *dev_priv = dev->dev_private; |
1275 | struct intel_engine_cs *waiter; | 1275 | struct intel_engine_cs *waiter; |
@@ -1310,7 +1310,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, | |||
1310 | unsigned int num_dwords) | 1310 | unsigned int num_dwords) |
1311 | { | 1311 | { |
1312 | #define MBOX_UPDATE_DWORDS 6 | 1312 | #define MBOX_UPDATE_DWORDS 6 |
1313 | struct intel_engine_cs *signaller = signaller_req->ring; | 1313 | struct intel_engine_cs *signaller = signaller_req->engine; |
1314 | struct drm_device *dev = signaller->dev; | 1314 | struct drm_device *dev = signaller->dev; |
1315 | struct drm_i915_private *dev_priv = dev->dev_private; | 1315 | struct drm_i915_private *dev_priv = dev->dev_private; |
1316 | struct intel_engine_cs *waiter; | 1316 | struct intel_engine_cs *waiter; |
@@ -1348,7 +1348,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, | |||
1348 | static int gen6_signal(struct drm_i915_gem_request *signaller_req, | 1348 | static int gen6_signal(struct drm_i915_gem_request *signaller_req, |
1349 | unsigned int num_dwords) | 1349 | unsigned int num_dwords) |
1350 | { | 1350 | { |
1351 | struct intel_engine_cs *signaller = signaller_req->ring; | 1351 | struct intel_engine_cs *signaller = signaller_req->engine; |
1352 | struct drm_device *dev = signaller->dev; | 1352 | struct drm_device *dev = signaller->dev; |
1353 | struct drm_i915_private *dev_priv = dev->dev_private; | 1353 | struct drm_i915_private *dev_priv = dev->dev_private; |
1354 | struct intel_engine_cs *useless; | 1354 | struct intel_engine_cs *useless; |
@@ -1393,7 +1393,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, | |||
1393 | static int | 1393 | static int |
1394 | gen6_add_request(struct drm_i915_gem_request *req) | 1394 | gen6_add_request(struct drm_i915_gem_request *req) |
1395 | { | 1395 | { |
1396 | struct intel_engine_cs *engine = req->ring; | 1396 | struct intel_engine_cs *engine = req->engine; |
1397 | int ret; | 1397 | int ret; |
1398 | 1398 | ||
1399 | if (engine->semaphore.signal) | 1399 | if (engine->semaphore.signal) |
@@ -1434,7 +1434,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, | |||
1434 | struct intel_engine_cs *signaller, | 1434 | struct intel_engine_cs *signaller, |
1435 | u32 seqno) | 1435 | u32 seqno) |
1436 | { | 1436 | { |
1437 | struct intel_engine_cs *waiter = waiter_req->ring; | 1437 | struct intel_engine_cs *waiter = waiter_req->engine; |
1438 | struct drm_i915_private *dev_priv = waiter->dev->dev_private; | 1438 | struct drm_i915_private *dev_priv = waiter->dev->dev_private; |
1439 | int ret; | 1439 | int ret; |
1440 | 1440 | ||
@@ -1460,7 +1460,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, | |||
1460 | struct intel_engine_cs *signaller, | 1460 | struct intel_engine_cs *signaller, |
1461 | u32 seqno) | 1461 | u32 seqno) |
1462 | { | 1462 | { |
1463 | struct intel_engine_cs *waiter = waiter_req->ring; | 1463 | struct intel_engine_cs *waiter = waiter_req->engine; |
1464 | u32 dw1 = MI_SEMAPHORE_MBOX | | 1464 | u32 dw1 = MI_SEMAPHORE_MBOX | |
1465 | MI_SEMAPHORE_COMPARE | | 1465 | MI_SEMAPHORE_COMPARE | |
1466 | MI_SEMAPHORE_REGISTER; | 1466 | MI_SEMAPHORE_REGISTER; |
@@ -1508,7 +1508,7 @@ do { \ | |||
1508 | static int | 1508 | static int |
1509 | pc_render_add_request(struct drm_i915_gem_request *req) | 1509 | pc_render_add_request(struct drm_i915_gem_request *req) |
1510 | { | 1510 | { |
1511 | struct intel_engine_cs *engine = req->ring; | 1511 | struct intel_engine_cs *engine = req->engine; |
1512 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; | 1512 | u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
1513 | int ret; | 1513 | int ret; |
1514 | 1514 | ||
@@ -1706,7 +1706,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, | |||
1706 | u32 invalidate_domains, | 1706 | u32 invalidate_domains, |
1707 | u32 flush_domains) | 1707 | u32 flush_domains) |
1708 | { | 1708 | { |
1709 | struct intel_engine_cs *engine = req->ring; | 1709 | struct intel_engine_cs *engine = req->engine; |
1710 | int ret; | 1710 | int ret; |
1711 | 1711 | ||
1712 | ret = intel_ring_begin(req, 2); | 1712 | ret = intel_ring_begin(req, 2); |
@@ -1722,7 +1722,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, | |||
1722 | static int | 1722 | static int |
1723 | i9xx_add_request(struct drm_i915_gem_request *req) | 1723 | i9xx_add_request(struct drm_i915_gem_request *req) |
1724 | { | 1724 | { |
1725 | struct intel_engine_cs *engine = req->ring; | 1725 | struct intel_engine_cs *engine = req->engine; |
1726 | int ret; | 1726 | int ret; |
1727 | 1727 | ||
1728 | ret = intel_ring_begin(req, 4); | 1728 | ret = intel_ring_begin(req, 4); |
@@ -1868,7 +1868,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
1868 | u64 offset, u32 length, | 1868 | u64 offset, u32 length, |
1869 | unsigned dispatch_flags) | 1869 | unsigned dispatch_flags) |
1870 | { | 1870 | { |
1871 | struct intel_engine_cs *engine = req->ring; | 1871 | struct intel_engine_cs *engine = req->engine; |
1872 | int ret; | 1872 | int ret; |
1873 | 1873 | ||
1874 | ret = intel_ring_begin(req, 2); | 1874 | ret = intel_ring_begin(req, 2); |
@@ -1895,7 +1895,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
1895 | u64 offset, u32 len, | 1895 | u64 offset, u32 len, |
1896 | unsigned dispatch_flags) | 1896 | unsigned dispatch_flags) |
1897 | { | 1897 | { |
1898 | struct intel_engine_cs *engine = req->ring; | 1898 | struct intel_engine_cs *engine = req->engine; |
1899 | u32 cs_offset = engine->scratch.gtt_offset; | 1899 | u32 cs_offset = engine->scratch.gtt_offset; |
1900 | int ret; | 1900 | int ret; |
1901 | 1901 | ||
@@ -1957,7 +1957,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
1957 | u64 offset, u32 len, | 1957 | u64 offset, u32 len, |
1958 | unsigned dispatch_flags) | 1958 | unsigned dispatch_flags) |
1959 | { | 1959 | { |
1960 | struct intel_engine_cs *engine = req->ring; | 1960 | struct intel_engine_cs *engine = req->engine; |
1961 | int ret; | 1961 | int ret; |
1962 | 1962 | ||
1963 | ret = intel_ring_begin(req, 2); | 1963 | ret = intel_ring_begin(req, 2); |
@@ -2187,7 +2187,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) | |||
2187 | return ERR_PTR(-ENOMEM); | 2187 | return ERR_PTR(-ENOMEM); |
2188 | } | 2188 | } |
2189 | 2189 | ||
2190 | ring->ring = engine; | 2190 | ring->engine = engine; |
2191 | list_add(&ring->link, &engine->buffers); | 2191 | list_add(&ring->link, &engine->buffers); |
2192 | 2192 | ||
2193 | ring->size = size; | 2193 | ring->size = size; |
@@ -2377,7 +2377,7 @@ int intel_ring_idle(struct intel_engine_cs *engine) | |||
2377 | 2377 | ||
2378 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) | 2378 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) |
2379 | { | 2379 | { |
2380 | request->ringbuf = request->ring->buffer; | 2380 | request->ringbuf = request->engine->buffer; |
2381 | return 0; | 2381 | return 0; |
2382 | } | 2382 | } |
2383 | 2383 | ||
@@ -2498,7 +2498,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, | |||
2498 | int ret; | 2498 | int ret; |
2499 | 2499 | ||
2500 | WARN_ON(req == NULL); | 2500 | WARN_ON(req == NULL); |
2501 | engine = req->ring; | 2501 | engine = req->engine; |
2502 | dev_priv = engine->dev->dev_private; | 2502 | dev_priv = engine->dev->dev_private; |
2503 | 2503 | ||
2504 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | 2504 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
@@ -2517,7 +2517,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, | |||
2517 | /* Align the ring tail to a cacheline boundary */ | 2517 | /* Align the ring tail to a cacheline boundary */ |
2518 | int intel_ring_cacheline_align(struct drm_i915_gem_request *req) | 2518 | int intel_ring_cacheline_align(struct drm_i915_gem_request *req) |
2519 | { | 2519 | { |
2520 | struct intel_engine_cs *engine = req->ring; | 2520 | struct intel_engine_cs *engine = req->engine; |
2521 | int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); | 2521 | int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); |
2522 | int ret; | 2522 | int ret; |
2523 | 2523 | ||
@@ -2589,7 +2589,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, | |||
2589 | static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, | 2589 | static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, |
2590 | u32 invalidate, u32 flush) | 2590 | u32 invalidate, u32 flush) |
2591 | { | 2591 | { |
2592 | struct intel_engine_cs *engine = req->ring; | 2592 | struct intel_engine_cs *engine = req->engine; |
2593 | uint32_t cmd; | 2593 | uint32_t cmd; |
2594 | int ret; | 2594 | int ret; |
2595 | 2595 | ||
@@ -2636,7 +2636,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
2636 | u64 offset, u32 len, | 2636 | u64 offset, u32 len, |
2637 | unsigned dispatch_flags) | 2637 | unsigned dispatch_flags) |
2638 | { | 2638 | { |
2639 | struct intel_engine_cs *engine = req->ring; | 2639 | struct intel_engine_cs *engine = req->engine; |
2640 | bool ppgtt = USES_PPGTT(engine->dev) && | 2640 | bool ppgtt = USES_PPGTT(engine->dev) && |
2641 | !(dispatch_flags & I915_DISPATCH_SECURE); | 2641 | !(dispatch_flags & I915_DISPATCH_SECURE); |
2642 | int ret; | 2642 | int ret; |
@@ -2662,7 +2662,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
2662 | u64 offset, u32 len, | 2662 | u64 offset, u32 len, |
2663 | unsigned dispatch_flags) | 2663 | unsigned dispatch_flags) |
2664 | { | 2664 | { |
2665 | struct intel_engine_cs *engine = req->ring; | 2665 | struct intel_engine_cs *engine = req->engine; |
2666 | int ret; | 2666 | int ret; |
2667 | 2667 | ||
2668 | ret = intel_ring_begin(req, 2); | 2668 | ret = intel_ring_begin(req, 2); |
@@ -2687,7 +2687,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
2687 | u64 offset, u32 len, | 2687 | u64 offset, u32 len, |
2688 | unsigned dispatch_flags) | 2688 | unsigned dispatch_flags) |
2689 | { | 2689 | { |
2690 | struct intel_engine_cs *engine = req->ring; | 2690 | struct intel_engine_cs *engine = req->engine; |
2691 | int ret; | 2691 | int ret; |
2692 | 2692 | ||
2693 | ret = intel_ring_begin(req, 2); | 2693 | ret = intel_ring_begin(req, 2); |
@@ -2710,7 +2710,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, | |||
2710 | static int gen6_ring_flush(struct drm_i915_gem_request *req, | 2710 | static int gen6_ring_flush(struct drm_i915_gem_request *req, |
2711 | u32 invalidate, u32 flush) | 2711 | u32 invalidate, u32 flush) |
2712 | { | 2712 | { |
2713 | struct intel_engine_cs *engine = req->ring; | 2713 | struct intel_engine_cs *engine = req->engine; |
2714 | struct drm_device *dev = engine->dev; | 2714 | struct drm_device *dev = engine->dev; |
2715 | uint32_t cmd; | 2715 | uint32_t cmd; |
2716 | int ret; | 2716 | int ret; |
@@ -2756,7 +2756,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, | |||
2756 | int intel_init_render_ring_buffer(struct drm_device *dev) | 2756 | int intel_init_render_ring_buffer(struct drm_device *dev) |
2757 | { | 2757 | { |
2758 | struct drm_i915_private *dev_priv = dev->dev_private; | 2758 | struct drm_i915_private *dev_priv = dev->dev_private; |
2759 | struct intel_engine_cs *engine = &dev_priv->ring[RCS]; | 2759 | struct intel_engine_cs *engine = &dev_priv->engine[RCS]; |
2760 | struct drm_i915_gem_object *obj; | 2760 | struct drm_i915_gem_object *obj; |
2761 | int ret; | 2761 | int ret; |
2762 | 2762 | ||
@@ -2907,7 +2907,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2907 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 2907 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
2908 | { | 2908 | { |
2909 | struct drm_i915_private *dev_priv = dev->dev_private; | 2909 | struct drm_i915_private *dev_priv = dev->dev_private; |
2910 | struct intel_engine_cs *engine = &dev_priv->ring[VCS]; | 2910 | struct intel_engine_cs *engine = &dev_priv->engine[VCS]; |
2911 | 2911 | ||
2912 | engine->name = "bsd ring"; | 2912 | engine->name = "bsd ring"; |
2913 | engine->id = VCS; | 2913 | engine->id = VCS; |
@@ -2984,7 +2984,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
2984 | int intel_init_bsd2_ring_buffer(struct drm_device *dev) | 2984 | int intel_init_bsd2_ring_buffer(struct drm_device *dev) |
2985 | { | 2985 | { |
2986 | struct drm_i915_private *dev_priv = dev->dev_private; | 2986 | struct drm_i915_private *dev_priv = dev->dev_private; |
2987 | struct intel_engine_cs *engine = &dev_priv->ring[VCS2]; | 2987 | struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; |
2988 | 2988 | ||
2989 | engine->name = "bsd2 ring"; | 2989 | engine->name = "bsd2 ring"; |
2990 | engine->id = VCS2; | 2990 | engine->id = VCS2; |
@@ -3015,7 +3015,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) | |||
3015 | int intel_init_blt_ring_buffer(struct drm_device *dev) | 3015 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
3016 | { | 3016 | { |
3017 | struct drm_i915_private *dev_priv = dev->dev_private; | 3017 | struct drm_i915_private *dev_priv = dev->dev_private; |
3018 | struct intel_engine_cs *engine = &dev_priv->ring[BCS]; | 3018 | struct intel_engine_cs *engine = &dev_priv->engine[BCS]; |
3019 | 3019 | ||
3020 | engine->name = "blitter ring"; | 3020 | engine->name = "blitter ring"; |
3021 | engine->id = BCS; | 3021 | engine->id = BCS; |
@@ -3073,7 +3073,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
3073 | int intel_init_vebox_ring_buffer(struct drm_device *dev) | 3073 | int intel_init_vebox_ring_buffer(struct drm_device *dev) |
3074 | { | 3074 | { |
3075 | struct drm_i915_private *dev_priv = dev->dev_private; | 3075 | struct drm_i915_private *dev_priv = dev->dev_private; |
3076 | struct intel_engine_cs *engine = &dev_priv->ring[VECS]; | 3076 | struct intel_engine_cs *engine = &dev_priv->engine[VECS]; |
3077 | 3077 | ||
3078 | engine->name = "video enhancement ring"; | 3078 | engine->name = "video enhancement ring"; |
3079 | engine->id = VECS; | 3079 | engine->id = VECS; |
@@ -3125,7 +3125,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
3125 | int | 3125 | int |
3126 | intel_ring_flush_all_caches(struct drm_i915_gem_request *req) | 3126 | intel_ring_flush_all_caches(struct drm_i915_gem_request *req) |
3127 | { | 3127 | { |
3128 | struct intel_engine_cs *engine = req->ring; | 3128 | struct intel_engine_cs *engine = req->engine; |
3129 | int ret; | 3129 | int ret; |
3130 | 3130 | ||
3131 | if (!engine->gpu_caches_dirty) | 3131 | if (!engine->gpu_caches_dirty) |
@@ -3144,7 +3144,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req) | |||
3144 | int | 3144 | int |
3145 | intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) | 3145 | intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) |
3146 | { | 3146 | { |
3147 | struct intel_engine_cs *engine = req->ring; | 3147 | struct intel_engine_cs *engine = req->engine; |
3148 | uint32_t flush_domains; | 3148 | uint32_t flush_domains; |
3149 | int ret; | 3149 | int ret; |
3150 | 3150 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 48484639c9da..4b0114e3c467 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -99,7 +99,7 @@ struct intel_ringbuffer { | |||
99 | void __iomem *virtual_start; | 99 | void __iomem *virtual_start; |
100 | struct i915_vma *vma; | 100 | struct i915_vma *vma; |
101 | 101 | ||
102 | struct intel_engine_cs *ring; | 102 | struct intel_engine_cs *engine; |
103 | struct list_head link; | 103 | struct list_head link; |
104 | 104 | ||
105 | u32 head; | 105 | u32 head; |