diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 7 |
6 files changed, 33 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a76aa31dd1cf..b2dca46989e0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -166,8 +166,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
166 | *t = '\0'; | 166 | *t = '\0'; |
167 | seq_printf(m, " (%s mappable)", s); | 167 | seq_printf(m, " (%s mappable)", s); |
168 | } | 168 | } |
169 | if (obj->ring != NULL) | 169 | if (obj->last_read_req != NULL) |
170 | seq_printf(m, " (%s)", obj->ring->name); | 170 | seq_printf(m, " (%s)", |
171 | i915_gem_request_get_ring(obj->last_read_req)->name); | ||
171 | if (obj->frontbuffer_bits) | 172 | if (obj->frontbuffer_bits) |
172 | seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); | 173 | seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); |
173 | } | 174 | } |
@@ -334,7 +335,7 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
334 | if (ppgtt->file_priv != stats->file_priv) | 335 | if (ppgtt->file_priv != stats->file_priv) |
335 | continue; | 336 | continue; |
336 | 337 | ||
337 | if (obj->ring) /* XXX per-vma statistic */ | 338 | if (obj->active) /* XXX per-vma statistic */ |
338 | stats->active += obj->base.size; | 339 | stats->active += obj->base.size; |
339 | else | 340 | else |
340 | stats->inactive += obj->base.size; | 341 | stats->inactive += obj->base.size; |
@@ -344,7 +345,7 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
344 | } else { | 345 | } else { |
345 | if (i915_gem_obj_ggtt_bound(obj)) { | 346 | if (i915_gem_obj_ggtt_bound(obj)) { |
346 | stats->global += obj->base.size; | 347 | stats->global += obj->base.size; |
347 | if (obj->ring) | 348 | if (obj->active) |
348 | stats->active += obj->base.size; | 349 | stats->active += obj->base.size; |
349 | else | 350 | else |
350 | stats->inactive += obj->base.size; | 351 | stats->inactive += obj->base.size; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 04fb96e67093..97804a3be67c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1941,8 +1941,6 @@ struct drm_i915_gem_object { | |||
1941 | void *dma_buf_vmapping; | 1941 | void *dma_buf_vmapping; |
1942 | int vmapping_count; | 1942 | int vmapping_count; |
1943 | 1943 | ||
1944 | struct intel_engine_cs *ring; | ||
1945 | |||
1946 | /** Breadcrumb of last rendering to the buffer. */ | 1944 | /** Breadcrumb of last rendering to the buffer. */ |
1947 | struct drm_i915_gem_request *last_read_req; | 1945 | struct drm_i915_gem_request *last_read_req; |
1948 | struct drm_i915_gem_request *last_write_req; | 1946 | struct drm_i915_gem_request *last_write_req; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 16a8445403a7..b9222a76e52f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2263,14 +2263,18 @@ static void | |||
2263 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | 2263 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
2264 | struct intel_engine_cs *ring) | 2264 | struct intel_engine_cs *ring) |
2265 | { | 2265 | { |
2266 | struct drm_i915_gem_request *req = intel_ring_get_request(ring); | 2266 | struct drm_i915_gem_request *req; |
2267 | struct intel_engine_cs *old_ring; | ||
2267 | 2268 | ||
2268 | BUG_ON(ring == NULL); | 2269 | BUG_ON(ring == NULL); |
2269 | if (obj->ring != ring && obj->last_write_req) { | 2270 | |
2271 | req = intel_ring_get_request(ring); | ||
2272 | old_ring = i915_gem_request_get_ring(obj->last_read_req); | ||
2273 | |||
2274 | if (old_ring != ring && obj->last_write_req) { | ||
2270 | /* Keep the request relative to the current ring */ | 2275 | /* Keep the request relative to the current ring */ |
2271 | i915_gem_request_assign(&obj->last_write_req, req); | 2276 | i915_gem_request_assign(&obj->last_write_req, req); |
2272 | } | 2277 | } |
2273 | obj->ring = ring; | ||
2274 | 2278 | ||
2275 | /* Add a reference if we're newly entering the active list. */ | 2279 | /* Add a reference if we're newly entering the active list. */ |
2276 | if (!obj->active) { | 2280 | if (!obj->active) { |
@@ -2309,7 +2313,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
2309 | intel_fb_obj_flush(obj, true); | 2313 | intel_fb_obj_flush(obj, true); |
2310 | 2314 | ||
2311 | list_del_init(&obj->ring_list); | 2315 | list_del_init(&obj->ring_list); |
2312 | obj->ring = NULL; | ||
2313 | 2316 | ||
2314 | i915_gem_request_assign(&obj->last_read_req, NULL); | 2317 | i915_gem_request_assign(&obj->last_read_req, NULL); |
2315 | i915_gem_request_assign(&obj->last_write_req, NULL); | 2318 | i915_gem_request_assign(&obj->last_write_req, NULL); |
@@ -2326,9 +2329,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
2326 | static void | 2329 | static void |
2327 | i915_gem_object_retire(struct drm_i915_gem_object *obj) | 2330 | i915_gem_object_retire(struct drm_i915_gem_object *obj) |
2328 | { | 2331 | { |
2329 | struct intel_engine_cs *ring = obj->ring; | 2332 | if (obj->last_read_req == NULL) |
2330 | |||
2331 | if (ring == NULL) | ||
2332 | return; | 2333 | return; |
2333 | 2334 | ||
2334 | if (i915_gem_request_completed(obj->last_read_req, true)) | 2335 | if (i915_gem_request_completed(obj->last_read_req, true)) |
@@ -2861,14 +2862,17 @@ i915_gem_idle_work_handler(struct work_struct *work) | |||
2861 | static int | 2862 | static int |
2862 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | 2863 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
2863 | { | 2864 | { |
2865 | struct intel_engine_cs *ring; | ||
2864 | int ret; | 2866 | int ret; |
2865 | 2867 | ||
2866 | if (obj->active) { | 2868 | if (obj->active) { |
2869 | ring = i915_gem_request_get_ring(obj->last_read_req); | ||
2870 | |||
2867 | ret = i915_gem_check_olr(obj->last_read_req); | 2871 | ret = i915_gem_check_olr(obj->last_read_req); |
2868 | if (ret) | 2872 | if (ret) |
2869 | return ret; | 2873 | return ret; |
2870 | 2874 | ||
2871 | i915_gem_retire_requests_ring(obj->ring); | 2875 | i915_gem_retire_requests_ring(ring); |
2872 | } | 2876 | } |
2873 | 2877 | ||
2874 | return 0; | 2878 | return 0; |
@@ -2971,10 +2975,12 @@ int | |||
2971 | i915_gem_object_sync(struct drm_i915_gem_object *obj, | 2975 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
2972 | struct intel_engine_cs *to) | 2976 | struct intel_engine_cs *to) |
2973 | { | 2977 | { |
2974 | struct intel_engine_cs *from = obj->ring; | 2978 | struct intel_engine_cs *from; |
2975 | u32 seqno; | 2979 | u32 seqno; |
2976 | int ret, idx; | 2980 | int ret, idx; |
2977 | 2981 | ||
2982 | from = i915_gem_request_get_ring(obj->last_read_req); | ||
2983 | |||
2978 | if (from == NULL || to == from) | 2984 | if (from == NULL || to == from) |
2979 | return 0; | 2985 | return 0; |
2980 | 2986 | ||
@@ -3929,7 +3935,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3929 | bool was_pin_display; | 3935 | bool was_pin_display; |
3930 | int ret; | 3936 | int ret; |
3931 | 3937 | ||
3932 | if (pipelined != obj->ring) { | 3938 | if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { |
3933 | ret = i915_gem_object_sync(obj, pipelined); | 3939 | ret = i915_gem_object_sync(obj, pipelined); |
3934 | if (ret) | 3940 | if (ret) |
3935 | return ret; | 3941 | return ret; |
@@ -4284,9 +4290,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4284 | ret = i915_gem_object_flush_active(obj); | 4290 | ret = i915_gem_object_flush_active(obj); |
4285 | 4291 | ||
4286 | args->busy = obj->active; | 4292 | args->busy = obj->active; |
4287 | if (obj->ring) { | 4293 | if (obj->last_read_req) { |
4294 | struct intel_engine_cs *ring; | ||
4288 | BUILD_BUG_ON(I915_NUM_RINGS > 16); | 4295 | BUILD_BUG_ON(I915_NUM_RINGS > 16); |
4289 | args->busy |= intel_ring_flag(obj->ring) << 16; | 4296 | ring = i915_gem_request_get_ring(obj->last_read_req); |
4297 | args->busy |= intel_ring_flag(ring) << 16; | ||
4290 | } | 4298 | } |
4291 | 4299 | ||
4292 | drm_gem_object_unreference(&obj->base); | 4300 | drm_gem_object_unreference(&obj->base); |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d17ff435f276..3c3a9ff9eb54 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -619,7 +619,8 @@ static int do_switch(struct intel_engine_cs *ring, | |||
619 | * swapped, but there is no way to do that yet. | 619 | * swapped, but there is no way to do that yet. |
620 | */ | 620 | */ |
621 | from->legacy_hw_ctx.rcs_state->dirty = 1; | 621 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
622 | BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); | 622 | BUG_ON(i915_gem_request_get_ring( |
623 | from->legacy_hw_ctx.rcs_state->last_read_req) != ring); | ||
623 | 624 | ||
624 | /* obj is kept alive until the next request by its active ref */ | 625 | /* obj is kept alive until the next request by its active ref */ |
625 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); | 626 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index af0ceeedda9b..c4536e185b75 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -683,7 +683,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
683 | err->dirty = obj->dirty; | 683 | err->dirty = obj->dirty; |
684 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 684 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
685 | err->userptr = obj->userptr.mm != NULL; | 685 | err->userptr = obj->userptr.mm != NULL; |
686 | err->ring = obj->ring ? obj->ring->id : -1; | 686 | err->ring = obj->last_read_req ? |
687 | i915_gem_request_get_ring(obj->last_read_req)->id : -1; | ||
687 | err->cache_level = obj->cache_level; | 688 | err->cache_level = obj->cache_level; |
688 | } | 689 | } |
689 | 690 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 92a0350c1df9..e216cb7d8729 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -9528,7 +9528,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, | |||
9528 | else if (i915.enable_execlists) | 9528 | else if (i915.enable_execlists) |
9529 | return true; | 9529 | return true; |
9530 | else | 9530 | else |
9531 | return ring != obj->ring; | 9531 | return ring != i915_gem_request_get_ring(obj->last_read_req); |
9532 | } | 9532 | } |
9533 | 9533 | ||
9534 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) | 9534 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) |
@@ -9888,7 +9888,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9888 | } else if (IS_IVYBRIDGE(dev)) { | 9888 | } else if (IS_IVYBRIDGE(dev)) { |
9889 | ring = &dev_priv->ring[BCS]; | 9889 | ring = &dev_priv->ring[BCS]; |
9890 | } else if (INTEL_INFO(dev)->gen >= 7) { | 9890 | } else if (INTEL_INFO(dev)->gen >= 7) { |
9891 | ring = obj->ring; | 9891 | ring = i915_gem_request_get_ring(obj->last_read_req); |
9892 | if (ring == NULL || ring->id != RCS) | 9892 | if (ring == NULL || ring->id != RCS) |
9893 | ring = &dev_priv->ring[BCS]; | 9893 | ring = &dev_priv->ring[BCS]; |
9894 | } else { | 9894 | } else { |
@@ -9910,7 +9910,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9910 | 9910 | ||
9911 | i915_gem_request_assign(&work->flip_queued_req, | 9911 | i915_gem_request_assign(&work->flip_queued_req, |
9912 | obj->last_write_req); | 9912 | obj->last_write_req); |
9913 | work->flip_queued_ring = obj->ring; | 9913 | work->flip_queued_ring = |
9914 | i915_gem_request_get_ring(obj->last_write_req); | ||
9914 | } else { | 9915 | } else { |
9915 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, | 9916 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, |
9916 | page_flip_flags); | 9917 | page_flip_flags); |