aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-11-20 05:20:02 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2017-11-20 10:56:16 -0500
commit3fef5cda970124a15c553c1672d800e40fc08a9e (patch)
tree6ba9c24bca258c8e3bab90a01d29fdfa4fdb9fc1
parent2113184c6f6749f6e4e86a42894f67a50ead6775 (diff)
drm/i915: Automatic i915_switch_context for legacy
During request construction, after pinning the context we know whether or not we have to emit a context switch. So move this common operation from every caller into i915_gem_request_alloc() itself. v2: Always submit the request if we emitted some commands during request construction, as typically it also involves changes in global state. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171120102002.22254-2-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c5
10 files changed, 14 insertions, 43 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 61ba321e9970..e07eb0beef13 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5045,7 +5045,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5045 goto out_ctx; 5045 goto out_ctx;
5046 } 5046 }
5047 5047
5048 err = i915_switch_context(rq); 5048 err = 0;
5049 if (engine->init_context) 5049 if (engine->init_context)
5050 err = engine->init_context(rq); 5050 err = engine->init_context(rq);
5051 5051
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2db040695035..c1efbaf02bf2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -842,8 +842,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
842 struct intel_engine_cs *engine = req->engine; 842 struct intel_engine_cs *engine = req->engine;
843 843
844 lockdep_assert_held(&req->i915->drm.struct_mutex); 844 lockdep_assert_held(&req->i915->drm.struct_mutex);
845 if (i915_modparams.enable_execlists) 845 GEM_BUG_ON(i915_modparams.enable_execlists);
846 return 0;
847 846
848 if (!req->ctx->engine[engine->id].state) { 847 if (!req->ctx->engine[engine->id].state) {
849 struct i915_gem_context *to = req->ctx; 848 struct i915_gem_context *to = req->ctx;
@@ -899,7 +898,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
899 898
900 for_each_engine(engine, dev_priv, id) { 899 for_each_engine(engine, dev_priv, id) {
901 struct drm_i915_gem_request *req; 900 struct drm_i915_gem_request *req;
902 int ret;
903 901
904 if (engine_has_idle_kernel_context(engine)) 902 if (engine_has_idle_kernel_context(engine))
905 continue; 903 continue;
@@ -922,10 +920,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
922 GFP_KERNEL); 920 GFP_KERNEL);
923 } 921 }
924 922
925 ret = i915_switch_context(req);
926 i915_add_request(req); 923 i915_add_request(req);
927 if (ret)
928 return ret;
929 } 924 }
930 925
931 return 0; 926 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7895788bc75..14d9e61a1e06 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1111,10 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1111 if (err) 1111 if (err)
1112 goto err_request; 1112 goto err_request;
1113 1113
1114 err = i915_switch_context(rq);
1115 if (err)
1116 goto err_request;
1117
1118 err = eb->engine->emit_bb_start(rq, 1114 err = eb->engine->emit_bb_start(rq,
1119 batch->node.start, PAGE_SIZE, 1115 batch->node.start, PAGE_SIZE,
1120 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); 1116 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
@@ -1960,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb)
1960 if (err) 1956 if (err)
1961 return err; 1957 return err;
1962 1958
1963 err = i915_switch_context(eb->request);
1964 if (err)
1965 return err;
1966
1967 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { 1959 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1968 err = i915_reset_gen7_sol_offsets(eb->request); 1960 err = i915_reset_gen7_sol_offsets(eb->request);
1969 if (err) 1961 if (err)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 91eae1b20c42..86e2346357cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -624,6 +624,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
624 if (ret) 624 if (ret)
625 goto err_unpin; 625 goto err_unpin;
626 626
627 ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
628 if (ret)
629 goto err_unreserve;
630
627 /* Move the oldest request to the slab-cache (if not in use!) */ 631 /* Move the oldest request to the slab-cache (if not in use!) */
628 req = list_first_entry_or_null(&engine->timeline->requests, 632 req = list_first_entry_or_null(&engine->timeline->requests,
629 typeof(*req), link); 633 typeof(*req), link);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0f48e666098d..fd150099978c 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1726,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
1726 GFP_KERNEL); 1726 GFP_KERNEL);
1727 } 1727 }
1728 1728
1729 ret = i915_switch_context(req);
1730 i915_add_request(req); 1729 i915_add_request(req);
1731 1730
1732 return ret; 1731 return 0;
1733} 1732}
1734 1733
1735/* 1734/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 12e734b29463..be98868115bf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1592,6 +1592,10 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
1592 if (ret) 1592 if (ret)
1593 return ret; 1593 return ret;
1594 1594
1595 ret = i915_switch_context(request);
1596 if (ret)
1597 return ret;
1598
1595 request->reserved_space -= LEGACY_REQUEST_SIZE; 1599 request->reserved_space -= LEGACY_REQUEST_SIZE;
1596 return 0; 1600 return 0;
1597} 1601}
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 159a2cb68765..db7a0a1f2960 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -989,13 +989,9 @@ static int gpu_write(struct i915_vma *vma,
989 i915_vma_unpin(batch); 989 i915_vma_unpin(batch);
990 i915_vma_close(batch); 990 i915_vma_close(batch);
991 991
992 err = i915_switch_context(rq); 992 err = engine->emit_bb_start(rq,
993 if (err) 993 batch->node.start, batch->node.size,
994 goto err_request; 994 flags);
995
996 err = rq->engine->emit_bb_start(rq,
997 batch->node.start, batch->node.size,
998 flags);
999 if (err) 995 if (err)
1000 goto err_request; 996 goto err_request;
1001 997
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 4ff30b9af1fe..09340b3c1156 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -158,10 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
158 goto err_batch; 158 goto err_batch;
159 } 159 }
160 160
161 err = i915_switch_context(rq);
162 if (err)
163 goto err_request;
164
165 flags = 0; 161 flags = 0;
166 if (INTEL_GEN(vm->i915) <= 5) 162 if (INTEL_GEN(vm->i915) <= 5)
167 flags |= I915_DISPATCH_SECURE; 163 flags |= I915_DISPATCH_SECURE;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index d7bf53ff8f84..647bf2bbd799 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -459,10 +459,6 @@ empty_request(struct intel_engine_cs *engine,
459 if (IS_ERR(request)) 459 if (IS_ERR(request))
460 return request; 460 return request;
461 461
462 err = i915_switch_context(request);
463 if (err)
464 goto out_request;
465
466 err = engine->emit_bb_start(request, 462 err = engine->emit_bb_start(request,
467 batch->node.start, 463 batch->node.start,
468 batch->node.size, 464 batch->node.size,
@@ -671,9 +667,6 @@ static int live_all_engines(void *arg)
671 goto out_request; 667 goto out_request;
672 } 668 }
673 669
674 err = i915_switch_context(request[id]);
675 GEM_BUG_ON(err);
676
677 err = engine->emit_bb_start(request[id], 670 err = engine->emit_bb_start(request[id],
678 batch->node.start, 671 batch->node.start,
679 batch->node.size, 672 batch->node.size,
@@ -790,9 +783,6 @@ static int live_sequential_engines(void *arg)
790 } 783 }
791 } 784 }
792 785
793 err = i915_switch_context(request[id]);
794 GEM_BUG_ON(err);
795
796 err = engine->emit_bb_start(request[id], 786 err = engine->emit_bb_start(request[id],
797 batch->node.start, 787 batch->node.start,
798 batch->node.size, 788 batch->node.size,
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 145bdc26553c..1bbb8c46e2d9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -114,10 +114,6 @@ static int emit_recurse_batch(struct hang *h,
114 if (err) 114 if (err)
115 goto unpin_vma; 115 goto unpin_vma;
116 116
117 err = i915_switch_context(rq);
118 if (err)
119 goto unpin_hws;
120
121 i915_vma_move_to_active(vma, rq, 0); 117 i915_vma_move_to_active(vma, rq, 0);
122 if (!i915_gem_object_has_active_reference(vma->obj)) { 118 if (!i915_gem_object_has_active_reference(vma->obj)) {
123 i915_gem_object_get(vma->obj); 119 i915_gem_object_get(vma->obj);
@@ -169,7 +165,6 @@ static int emit_recurse_batch(struct hang *h,
169 165
170 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); 166 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
171 167
172unpin_hws:
173 i915_vma_unpin(hws); 168 i915_vma_unpin(hws);
174unpin_vma: 169unpin_vma:
175 i915_vma_unpin(vma); 170 i915_vma_unpin(vma);