aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c42
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
9 files changed, 130 insertions, 136 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2fc58e658986..95d4969369a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -183,6 +183,7 @@ struct amdgpu_vm;
183struct amdgpu_ring; 183struct amdgpu_ring;
184struct amdgpu_semaphore; 184struct amdgpu_semaphore;
185struct amdgpu_cs_parser; 185struct amdgpu_cs_parser;
186struct amdgpu_job;
186struct amdgpu_irq_src; 187struct amdgpu_irq_src;
187struct amdgpu_fpriv; 188struct amdgpu_fpriv;
188 189
@@ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
871 struct amdgpu_ring *ring, 872 struct amdgpu_ring *ring,
872 struct amdgpu_ib *ibs, 873 struct amdgpu_ib *ibs,
873 unsigned num_ibs, 874 unsigned num_ibs,
874 int (*free_job)(struct amdgpu_cs_parser *), 875 int (*free_job)(struct amdgpu_job *),
875 void *owner, 876 void *owner,
876 struct fence **fence); 877 struct fence **fence);
877 878
@@ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
1040 1041
1041struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); 1042struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1042int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 1043int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1044struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);
1043 1045
1044uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 1046uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
1045 struct fence *fence, uint64_t queued_seq); 1047 struct fence *fence, uint64_t queued_seq);
@@ -1265,6 +1267,18 @@ struct amdgpu_cs_parser {
1265 struct amd_sched_fence *s_fence; 1267 struct amd_sched_fence *s_fence;
1266}; 1268};
1267 1269
1270struct amdgpu_job {
1271 struct amd_sched_job base;
1272 struct amdgpu_device *adev;
1273 struct amdgpu_ctx *ctx;
1274 struct drm_file *owner;
1275 struct amdgpu_ib *ibs;
1276 uint32_t num_ibs;
1277 struct mutex job_lock;
1278 struct amdgpu_user_fence uf;
1279 int (*free_job)(struct amdgpu_job *sched_job);
1280};
1281
1268static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1282static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1269{ 1283{
1270 return p->ibs[ib_idx].ptr[idx]; 1284 return p->ibs[ib_idx].ptr[idx];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e4424b4db5d3..c8de4b6194e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
126 return 0; 126 return 0;
127} 127}
128 128
129static void amdgpu_job_work_func(struct work_struct *work)
130{
131 struct amdgpu_cs_parser *sched_job =
132 container_of(work, struct amdgpu_cs_parser,
133 job_work);
134 mutex_lock(&sched_job->job_lock);
135 if (sched_job->free_job)
136 sched_job->free_job(sched_job);
137 mutex_unlock(&sched_job->job_lock);
138 /* after processing job, free memory */
139 fence_put(&sched_job->s_fence->base);
140 kfree(sched_job);
141}
142struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, 129struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
143 struct drm_file *filp, 130 struct drm_file *filp,
144 struct amdgpu_ctx *ctx, 131 struct amdgpu_ctx *ctx,
@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
157 parser->ctx = ctx; 144 parser->ctx = ctx;
158 parser->ibs = ibs; 145 parser->ibs = ibs;
159 parser->num_ibs = num_ibs; 146 parser->num_ibs = num_ibs;
160 if (amdgpu_enable_scheduler) {
161 mutex_init(&parser->job_lock);
162 INIT_WORK(&parser->job_work, amdgpu_job_work_func);
163 }
164 for (i = 0; i < num_ibs; i++) 147 for (i = 0; i < num_ibs; i++)
165 ibs[i].ctx = ctx; 148 ibs[i].ctx = ctx;
166 149
@@ -508,15 +491,17 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
508 for (i = 0; i < parser->nchunks; i++) 491 for (i = 0; i < parser->nchunks; i++)
509 drm_free_large(parser->chunks[i].kdata); 492 drm_free_large(parser->chunks[i].kdata);
510 kfree(parser->chunks); 493 kfree(parser->chunks);
511 if (parser->ibs)
512 for (i = 0; i < parser->num_ibs; i++)
513 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
514 kfree(parser->ibs);
515 if (parser->uf.bo)
516 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
517
518 if (!amdgpu_enable_scheduler) 494 if (!amdgpu_enable_scheduler)
519 kfree(parser); 495 {
496 if (parser->ibs)
497 for (i = 0; i < parser->num_ibs; i++)
498 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
499 kfree(parser->ibs);
500 if (parser->uf.bo)
501 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
502 }
503
504 kfree(parser);
520} 505}
521 506
522/** 507/**
@@ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
533 amdgpu_cs_parser_fini_late(parser); 518 amdgpu_cs_parser_fini_late(parser);
534} 519}
535 520
536static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
537{
538 amdgpu_cs_parser_fini_late(sched_job);
539 return 0;
540}
541
542static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 521static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
543 struct amdgpu_vm *vm) 522 struct amdgpu_vm *vm)
544{ 523{
@@ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
874 return ring; 853 return ring;
875} 854}
876 855
856static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
857{
858 int i;
859 amdgpu_ctx_put(sched_job->ctx);
860 if (sched_job->ibs)
861 for (i = 0; i < sched_job->num_ibs; i++)
862 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
863 kfree(sched_job->ibs);
864 if (sched_job->uf.bo)
865 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
866 return 0;
867}
868
877int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 869int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
878{ 870{
879 struct amdgpu_device *adev = dev->dev_private; 871 struct amdgpu_device *adev = dev->dev_private;
@@ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
900 } 892 }
901 893
902 if (amdgpu_enable_scheduler && parser->num_ibs) { 894 if (amdgpu_enable_scheduler && parser->num_ibs) {
895 struct amdgpu_job *job;
903 struct amdgpu_ring * ring = 896 struct amdgpu_ring * ring =
904 amdgpu_cs_parser_get_ring(adev, parser); 897 amdgpu_cs_parser_get_ring(adev, parser);
905 r = amdgpu_cs_parser_prepare_job(parser); 898 r = amdgpu_cs_parser_prepare_job(parser);
906 if (r) 899 if (r)
907 goto out; 900 goto out;
908 parser->ring = ring; 901 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
909 parser->free_job = amdgpu_cs_parser_free_job; 902 if (!job)
910 mutex_lock(&parser->job_lock); 903 return -ENOMEM;
911 r = amd_sched_push_job(ring->scheduler, 904 job->base.sched = ring->scheduler;
912 &parser->ctx->rings[ring->idx].entity, 905 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
913 parser, 906 job->adev = parser->adev;
914 &parser->s_fence); 907 job->ibs = parser->ibs;
908 job->num_ibs = parser->num_ibs;
909 job->owner = parser->filp;
910 job->ctx = amdgpu_ctx_get_ref(parser->ctx);
911 mutex_init(&job->job_lock);
912 if (job->ibs[job->num_ibs - 1].user) {
913 memcpy(&job->uf, &parser->uf,
914 sizeof(struct amdgpu_user_fence));
915 job->ibs[job->num_ibs - 1].user = &job->uf;
916 }
917
918 job->free_job = amdgpu_cs_free_job;
919 mutex_lock(&job->job_lock);
920 r = amd_sched_push_job((struct amd_sched_job *)job);
915 if (r) { 921 if (r) {
916 mutex_unlock(&parser->job_lock); 922 mutex_unlock(&job->job_lock);
923 amdgpu_cs_free_job(job);
924 kfree(job);
917 goto out; 925 goto out;
918 } 926 }
919 parser->ibs[parser->num_ibs - 1].sequence = 927 job->ibs[parser->num_ibs - 1].sequence =
920 amdgpu_ctx_add_fence(parser->ctx, ring, 928 amdgpu_ctx_add_fence(job->ctx, ring,
921 &parser->s_fence->base, 929 &job->base.s_fence->base,
922 parser->s_fence->v_seq); 930 job->base.s_fence->v_seq);
923 cs->out.handle = parser->s_fence->v_seq; 931 cs->out.handle = job->base.s_fence->v_seq;
924 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 932 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
925 ttm_eu_fence_buffer_objects(&parser->ticket, 933 ttm_eu_fence_buffer_objects(&parser->ticket,
926 &parser->validated, 934 &parser->validated,
927 &parser->s_fence->base); 935 &job->base.s_fence->base);
928 936
929 mutex_unlock(&parser->job_lock); 937 mutex_unlock(&job->job_lock);
938 amdgpu_cs_parser_fini_late(parser);
930 up_read(&adev->exclusive_lock); 939 up_read(&adev->exclusive_lock);
931 return 0; 940 return 0;
932 } 941 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 08bc7722ddb8..8660c0854a1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
219 return ctx; 219 return ctx;
220} 220}
221 221
222struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx)
223{
224 if (ctx)
225 kref_get(&ctx->refcount);
226 return ctx;
227}
228
222int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 229int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
223{ 230{
224 if (ctx == NULL) 231 if (ctx == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index a86e38158afa..5b1ae18f5e8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,81 +27,58 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29 29
30static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31 struct amd_sched_entity *entity,
32 struct amd_sched_job *job)
33{
34 int r = 0;
35 struct amdgpu_cs_parser *sched_job;
36 if (!job || !job->data) {
37 DRM_ERROR("job is null\n");
38 return -EINVAL;
39 }
40
41 sched_job = (struct amdgpu_cs_parser *)job->data;
42 if (sched_job->prepare_job) {
43 r = sched_job->prepare_job(sched_job);
44 if (r) {
45 DRM_ERROR("Prepare job error\n");
46 schedule_work(&sched_job->job_work);
47 }
48 }
49 return r;
50}
51
52static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, 30static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
53 struct amd_sched_entity *entity, 31 struct amd_sched_entity *entity,
54 struct amd_sched_job *job) 32 struct amd_sched_job *job)
55{ 33{
56 int r = 0; 34 int r = 0;
57 struct amdgpu_cs_parser *sched_job; 35 struct amdgpu_job *sched_job;
58 struct amdgpu_fence *fence; 36 struct amdgpu_fence *fence;
59 37
60 if (!job || !job->data) { 38 if (!job) {
61 DRM_ERROR("job is null\n"); 39 DRM_ERROR("job is null\n");
62 return NULL; 40 return NULL;
63 } 41 }
64 sched_job = (struct amdgpu_cs_parser *)job->data; 42 sched_job = (struct amdgpu_job *)job;
65 mutex_lock(&sched_job->job_lock); 43 mutex_lock(&sched_job->job_lock);
66 r = amdgpu_ib_schedule(sched_job->adev, 44 r = amdgpu_ib_schedule(sched_job->adev,
67 sched_job->num_ibs, 45 sched_job->num_ibs,
68 sched_job->ibs, 46 sched_job->ibs,
69 sched_job->filp); 47 sched_job->owner);
70 if (r) 48 if (r)
71 goto err; 49 goto err;
72 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 50 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
73 51
74 if (sched_job->run_job) {
75 r = sched_job->run_job(sched_job);
76 if (r)
77 goto err;
78 }
79
80 mutex_unlock(&sched_job->job_lock); 52 mutex_unlock(&sched_job->job_lock);
81 return &fence->base; 53 return &fence->base;
82 54
83err: 55err:
84 DRM_ERROR("Run job error\n"); 56 DRM_ERROR("Run job error\n");
85 mutex_unlock(&sched_job->job_lock); 57 mutex_unlock(&sched_job->job_lock);
86 schedule_work(&sched_job->job_work); 58 sched->ops->process_job(sched, (struct amd_sched_job *)sched_job);
87 return NULL; 59 return NULL;
88} 60}
89 61
90static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, 62static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
91 struct amd_sched_job *job) 63 struct amd_sched_job *job)
92{ 64{
93 struct amdgpu_cs_parser *sched_job; 65 struct amdgpu_job *sched_job;
94 66
95 if (!job || !job->data) { 67 if (!job) {
96 DRM_ERROR("job is null\n"); 68 DRM_ERROR("job is null\n");
97 return; 69 return;
98 } 70 }
99 sched_job = (struct amdgpu_cs_parser *)job->data; 71 sched_job = (struct amdgpu_job *)job;
100 schedule_work(&sched_job->job_work); 72 mutex_lock(&sched_job->job_lock);
73 if (sched_job->free_job)
74 sched_job->free_job(sched_job);
75 mutex_unlock(&sched_job->job_lock);
76 /* after processing job, free memory */
77 fence_put(&sched_job->base.s_fence->base);
78 kfree(sched_job);
101} 79}
102 80
103struct amd_sched_backend_ops amdgpu_sched_ops = { 81struct amd_sched_backend_ops amdgpu_sched_ops = {
104 .prepare_job = amdgpu_sched_prepare_job,
105 .run_job = amdgpu_sched_run_job, 82 .run_job = amdgpu_sched_run_job,
106 .process_job = amdgpu_sched_process_job 83 .process_job = amdgpu_sched_process_job
107}; 84};
@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
110 struct amdgpu_ring *ring, 87 struct amdgpu_ring *ring,
111 struct amdgpu_ib *ibs, 88 struct amdgpu_ib *ibs,
112 unsigned num_ibs, 89 unsigned num_ibs,
113 int (*free_job)(struct amdgpu_cs_parser *), 90 int (*free_job)(struct amdgpu_job *),
114 void *owner, 91 void *owner,
115 struct fence **f) 92 struct fence **f)
116{ 93{
117 int r = 0; 94 int r = 0;
118 if (amdgpu_enable_scheduler) { 95 if (amdgpu_enable_scheduler) {
119 struct amdgpu_cs_parser *sched_job = 96 struct amdgpu_job *job =
120 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, 97 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
121 ibs, num_ibs); 98 if (!job)
122 if(!sched_job) {
123 return -ENOMEM; 99 return -ENOMEM;
124 } 100 job->base.sched = ring->scheduler;
125 sched_job->free_job = free_job; 101 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
126 mutex_lock(&sched_job->job_lock); 102 job->adev = adev;
127 r = amd_sched_push_job(ring->scheduler, 103 job->ibs = ibs;
128 &adev->kernel_ctx.rings[ring->idx].entity, 104 job->num_ibs = num_ibs;
129 sched_job, &sched_job->s_fence); 105 job->owner = owner;
106 mutex_init(&job->job_lock);
107 job->free_job = free_job;
108 mutex_lock(&job->job_lock);
109 r = amd_sched_push_job((struct amd_sched_job *)job);
130 if (r) { 110 if (r) {
131 mutex_unlock(&sched_job->job_lock); 111 mutex_unlock(&job->job_lock);
132 kfree(sched_job); 112 kfree(job);
133 return r; 113 return r;
134 } 114 }
135 ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; 115 ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
136 *f = fence_get(&sched_job->s_fence->base); 116 *f = fence_get(&job->base.s_fence->base);
137 mutex_unlock(&sched_job->job_lock); 117 mutex_unlock(&job->job_lock);
138 } else { 118 } else {
139 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 119 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
140 if (r) 120 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 68369cf1e318..b87355ccfb1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
807} 807}
808 808
809static int amdgpu_uvd_free_job( 809static int amdgpu_uvd_free_job(
810 struct amdgpu_cs_parser *sched_job) 810 struct amdgpu_job *sched_job)
811{ 811{
812 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 812 amdgpu_ib_free(sched_job->adev, sched_job->ibs);
813 kfree(sched_job->ibs); 813 kfree(sched_job->ibs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 33ee6ae28f37..1a984c934b1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -340,7 +340,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
340} 340}
341 341
342static int amdgpu_vce_free_job( 342static int amdgpu_vce_free_job(
343 struct amdgpu_cs_parser *sched_job) 343 struct amdgpu_job *sched_job)
344{ 344{
345 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 345 amdgpu_ib_free(sched_job->adev, sched_job->ibs);
346 kfree(sched_job->ibs); 346 kfree(sched_job->ibs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a78a206e176e..5b99214d0ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -307,7 +307,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
307} 307}
308 308
309static int amdgpu_vm_free_job( 309static int amdgpu_vm_free_job(
310 struct amdgpu_cs_parser *sched_job) 310 struct amdgpu_job *sched_job)
311{ 311{
312 int i; 312 int i;
313 for (i = 0; i < sched_job->num_ibs; i++) 313 for (i = 0; i < sched_job->num_ibs; i++)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 265d3e2f63cc..462c1617d56e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -282,30 +282,18 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
282 * scheduler consum some queued command. 282 * scheduler consum some queued command.
283 * -1 other fail. 283 * -1 other fail.
284*/ 284*/
285int amd_sched_push_job(struct amd_gpu_scheduler *sched, 285int amd_sched_push_job(struct amd_sched_job *sched_job)
286 struct amd_sched_entity *c_entity,
287 void *data,
288 struct amd_sched_fence **fence)
289{ 286{
290 struct amd_sched_job *job; 287 struct amd_sched_fence *fence =
291 288 amd_sched_fence_create(sched_job->s_entity);
292 if (!fence) 289 if (!fence)
293 return -EINVAL; 290 return -EINVAL;
294 job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); 291 fence_get(&fence->base);
295 if (!job) 292 sched_job->s_fence = fence;
296 return -ENOMEM; 293 while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
297 job->sched = sched; 294 &sched_job, sizeof(void *),
298 job->s_entity = c_entity; 295 &sched_job->s_entity->queue_lock) !=
299 job->data = data; 296 sizeof(void *)) {
300 *fence = amd_sched_fence_create(c_entity);
301 if ((*fence) == NULL) {
302 kfree(job);
303 return -EINVAL;
304 }
305 fence_get(&(*fence)->base);
306 job->s_fence = *fence;
307 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
308 &c_entity->queue_lock) != sizeof(void *)) {
309 /** 297 /**
310 * Current context used up all its IB slots 298 * Current context used up all its IB slots
311 * wait here, or need to check whether GPU is hung 299 * wait here, or need to check whether GPU is hung
@@ -313,8 +301,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
313 schedule(); 301 schedule();
314 } 302 }
315 /* first job wake up scheduler */ 303 /* first job wake up scheduler */
316 if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1) 304 if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
317 wake_up_interruptible(&sched->wait_queue); 305 wake_up_interruptible(&sched_job->sched->wait_queue);
318 return 0; 306 return 0;
319} 307}
320 308
@@ -333,10 +321,8 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
333 list_del(&sched_job->list); 321 list_del(&sched_job->list);
334 atomic64_dec(&sched->hw_rq_count); 322 atomic64_dec(&sched->hw_rq_count);
335 spin_unlock_irqrestore(&sched->queue_lock, flags); 323 spin_unlock_irqrestore(&sched->queue_lock, flags);
336
337 sched->ops->process_job(sched, sched_job);
338 fence_put(&sched_job->s_fence->base); 324 fence_put(&sched_job->s_fence->base);
339 kfree(sched_job); 325 sched->ops->process_job(sched, sched_job);
340 wake_up_interruptible(&sched->wait_queue); 326 wake_up_interruptible(&sched->wait_queue);
341} 327}
342 328
@@ -359,7 +345,9 @@ static int amd_sched_main(void *param)
359 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); 345 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
360 if (r != sizeof(void *)) 346 if (r != sizeof(void *))
361 continue; 347 continue;
362 r = sched->ops->prepare_job(sched, c_entity, job); 348 r = 0;
349 if (sched->ops->prepare_job)
350 r = sched->ops->prepare_job(sched, c_entity, job);
363 if (!r) { 351 if (!r) {
364 unsigned long flags; 352 unsigned long flags;
365 spin_lock_irqsave(&sched->queue_lock, flags); 353 spin_lock_irqsave(&sched->queue_lock, flags);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index ceb5918bfbeb..25e38d030157 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -81,7 +81,6 @@ struct amd_sched_job {
81 struct fence_cb cb; 81 struct fence_cb cb;
82 struct amd_gpu_scheduler *sched; 82 struct amd_gpu_scheduler *sched;
83 struct amd_sched_entity *s_entity; 83 struct amd_sched_entity *s_entity;
84 void *data;
85 struct amd_sched_fence *s_fence; 84 struct amd_sched_fence *s_fence;
86}; 85};
87 86
@@ -140,10 +139,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
140 uint32_t hw_submission); 139 uint32_t hw_submission);
141int amd_sched_destroy(struct amd_gpu_scheduler *sched); 140int amd_sched_destroy(struct amd_gpu_scheduler *sched);
142 141
143int amd_sched_push_job(struct amd_gpu_scheduler *sched, 142int amd_sched_push_job(struct amd_sched_job *sched_job);
144 struct amd_sched_entity *c_entity,
145 void *data,
146 struct amd_sched_fence **fence);
147 143
148int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 144int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
149 struct amd_sched_entity *entity, 145 struct amd_sched_entity *entity,