aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h8
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c37
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h20
11 files changed, 87 insertions, 76 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9108b7c7d4a3..57b427f958da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -433,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
433void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 433void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
435 435
436void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 436int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
438 struct amdgpu_irq_src *irq_src, 438 struct amdgpu_irq_src *irq_src,
439 unsigned irq_type); 439 unsigned irq_type);
@@ -891,7 +891,7 @@ struct amdgpu_ring {
891 struct amdgpu_device *adev; 891 struct amdgpu_device *adev;
892 const struct amdgpu_ring_funcs *funcs; 892 const struct amdgpu_ring_funcs *funcs;
893 struct amdgpu_fence_driver fence_drv; 893 struct amdgpu_fence_driver fence_drv;
894 struct amd_gpu_scheduler *sched; 894 struct amd_gpu_scheduler sched;
895 895
896 spinlock_t fence_lock; 896 spinlock_t fence_lock;
897 struct mutex *ring_lock; 897 struct mutex *ring_lock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6f39b2d2106d..b74b6a8e80a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -848,7 +848,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
848 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 848 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
849 if (!job) 849 if (!job)
850 return -ENOMEM; 850 return -ENOMEM;
851 job->base.sched = ring->sched; 851 job->base.sched = &ring->sched;
852 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 852 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
853 job->adev = parser->adev; 853 job->adev = parser->adev;
854 job->ibs = parser->ibs; 854 job->ibs = parser->ibs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 5494831e1a24..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
43 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 44 struct amd_sched_rq *rq;
45 if (kernel) 45 if (kernel)
46 rq = &adev->rings[i]->sched->kernel_rq; 46 rq = &adev->rings[i]->sched.kernel_rq;
47 else 47 else
48 rq = &adev->rings[i]->sched->sched_rq; 48 rq = &adev->rings[i]->sched.sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->sched, 49 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 51 rq, amdgpu_sched_jobs);
52 if (r) 52 if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
55 55
56 if (i < adev->num_rings) { 56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++) 57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->sched, 58 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 59 &ctx->rings[j].entity);
60 kfree(ctx); 60 kfree(ctx);
61 return r; 61 return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 75
76 if (amdgpu_enable_scheduler) { 76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++) 77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->sched, 78 amd_sched_entity_fini(&adev->rings[i]->sched,
79 &ctx->rings[i].entity); 79 &ctx->rings[i].entity);
80 } 80 }
81} 81}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7f2d85e7e77a..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
609 * Init the fence driver for the requested ring (all asics). 609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init(). 610 * Helper function for amdgpu_fence_driver_init().
611 */ 611 */
612void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 612int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
613{ 613{
614 int i; 614 int i, r;
615 615
616 ring->fence_drv.cpu_addr = NULL; 616 ring->fence_drv.cpu_addr = NULL;
617 ring->fence_drv.gpu_addr = 0; 617 ring->fence_drv.gpu_addr = 0;
@@ -628,14 +628,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
628 init_waitqueue_head(&ring->fence_drv.fence_queue); 628 init_waitqueue_head(&ring->fence_drv.fence_queue);
629 629
630 if (amdgpu_enable_scheduler) { 630 if (amdgpu_enable_scheduler) {
631 ring->sched = amd_sched_create(&amdgpu_sched_ops, 631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
632 ring->idx, 632 amdgpu_sched_hw_submission, ring->name);
633 amdgpu_sched_hw_submission, 633 if (r) {
634 (void *)ring->adev); 634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
635 if (!ring->sched) 635 ring->name);
636 DRM_ERROR("Failed to create scheduler on ring %d.\n", 636 return r;
637 ring->idx); 637 }
638 } 638 }
639
640 return 0;
639} 641}
640 642
641/** 643/**
@@ -683,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
683 wake_up_all(&ring->fence_drv.fence_queue); 685 wake_up_all(&ring->fence_drv.fence_queue);
684 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
685 ring->fence_drv.irq_type); 687 ring->fence_drv.irq_type);
686 if (ring->sched) 688 amd_sched_fini(&ring->sched);
687 amd_sched_destroy(ring->sched);
688 ring->fence_drv.initialized = false; 689 ring->fence_drv.initialized = false;
689 } 690 }
690 mutex_unlock(&adev->ring_lock); 691 mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6e735431676d..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,7 +357,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
357 ring->adev = adev; 357 ring->adev = adev;
358 ring->idx = adev->num_rings++; 358 ring->idx = adev->num_rings++;
359 adev->rings[ring->idx] = ring; 359 adev->rings[ring->idx] = ring;
360 amdgpu_fence_driver_init_ring(ring); 360 r = amdgpu_fence_driver_init_ring(ring);
361 if (r)
362 return r;
361 } 363 }
362 364
363 r = amdgpu_wb_get(adev, &ring->rptr_offs); 365 r = amdgpu_wb_get(adev, &ring->rptr_offs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 7cf5405afe4e..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
145 struct amd_sched_fence *s_fence; 145 struct amd_sched_fence *s_fence;
146 146
147 s_fence = to_amd_sched_fence(f); 147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) 148 if (s_fence) {
149 return s_fence->sched->ring_id; 149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
150 a_fence = to_amdgpu_fence(f); 155 a_fence = to_amdgpu_fence(f);
151 if (a_fence) 156 if (a_fence)
152 return a_fence->ring->idx; 157 return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
412} 417}
413 418
414#if defined(CONFIG_DEBUG_FS) 419#if defined(CONFIG_DEBUG_FS)
420
421static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422{
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425
426 if (a_fence)
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
429
430 if (s_fence) {
431 struct amdgpu_ring *ring;
432
433
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
437 }
438}
439
415void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 440void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416 struct seq_file *m) 441 struct seq_file *m)
417{ 442{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 } 453 }
429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430 soffset, eoffset, eoffset - soffset); 455 soffset, eoffset, eoffset - soffset);
431 if (i->fence) { 456 if (i->fence)
432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); 457 amdgpu_sa_bo_dump_fence(i->fence, m);
433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016x on ring %d",
439 s_fence->base.seqno,
440 s_fence->sched->ring_id);
441
442 }
443 seq_printf(m, "\n"); 458 seq_printf(m, "\n");
444 } 459 }
445 spin_unlock(&sa_manager->wq.lock); 460 spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index d1984fc5dfc4..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -85,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
86 if (!job) 86 if (!job)
87 return -ENOMEM; 87 return -ENOMEM;
88 job->base.sched = ring->sched; 88 job->base.sched = &ring->sched;
89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
90 job->adev = adev; 90 job->adev = adev;
91 job->ibs = ibs; 91 job->ibs = ibs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index b57ca10a8533..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 65
66 if (a_fence) 66 if (a_fence)
67 return a_fence->ring->adev == adev; 67 return a_fence->ring->adev == adev;
68 if (s_fence) 68
69 return (struct amdgpu_device *)s_fence->sched->priv == adev; 69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
70 return false; 76 return false;
71} 77}
72 78
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index a1f4ece58a24..144f50acc971 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,21 +16,21 @@ TRACE_EVENT(amd_sched_job,
16 TP_ARGS(sched_job), 16 TP_ARGS(sched_job),
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(u32, ring_id) 19 __field(const char *, name)
20 __field(u32, job_count) 20 __field(u32, job_count)
21 __field(int, hw_job_count) 21 __field(int, hw_job_count)
22 ), 22 ),
23 23
24 TP_fast_assign( 24 TP_fast_assign(
25 __entry->entity = sched_job->s_entity; 25 __entry->entity = sched_job->s_entity;
26 __entry->ring_id = sched_job->sched->ring_id; 26 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len( 27 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job); 28 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read( 29 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count); 30 &sched_job->sched->hw_rq_count);
31 ), 31 ),
32 TP_printk("entity=%p, ring=%u, job count:%u, hw job count:%d", 32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->ring_id, __entry->job_count, 33 __entry->entity, __entry->name, __entry->job_count,
34 __entry->hw_job_count) 34 __entry->hw_job_count)
35); 35);
36#endif 36#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ec4842e58fd7..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -381,56 +381,45 @@ static int amd_sched_main(void *param)
381} 381}
382 382
383/** 383/**
384 * Create a gpu scheduler 384 * Init a gpu scheduler instance
385 * 385 *
386 * @sched The pointer to the scheduler
386 * @ops The backend operations for this scheduler. 387 * @ops The backend operations for this scheduler.
387 * @ring The the ring id for the scheduler.
388 * @hw_submissions Number of hw submissions to do. 388 * @hw_submissions Number of hw submissions to do.
389 * @name Name used for debugging
389 * 390 *
390 * Return the pointer to scheduler for success, otherwise return NULL 391 * Return 0 on success, otherwise error code.
391*/ 392*/
392struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, 393int amd_sched_init(struct amd_gpu_scheduler *sched,
393 unsigned ring, unsigned hw_submission, 394 struct amd_sched_backend_ops *ops,
394 void *priv) 395 unsigned hw_submission, const char *name)
395{ 396{
396 struct amd_gpu_scheduler *sched;
397
398 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
399 if (!sched)
400 return NULL;
401
402 sched->ops = ops; 397 sched->ops = ops;
403 sched->ring_id = ring;
404 sched->hw_submission_limit = hw_submission; 398 sched->hw_submission_limit = hw_submission;
405 sched->priv = priv; 399 sched->name = name;
406 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
407 amd_sched_rq_init(&sched->sched_rq); 400 amd_sched_rq_init(&sched->sched_rq);
408 amd_sched_rq_init(&sched->kernel_rq); 401 amd_sched_rq_init(&sched->kernel_rq);
409 402
410 init_waitqueue_head(&sched->wake_up_worker); 403 init_waitqueue_head(&sched->wake_up_worker);
411 init_waitqueue_head(&sched->job_scheduled); 404 init_waitqueue_head(&sched->job_scheduled);
412 atomic_set(&sched->hw_rq_count, 0); 405 atomic_set(&sched->hw_rq_count, 0);
406
413 /* Each scheduler will run on a seperate kernel thread */ 407 /* Each scheduler will run on a seperate kernel thread */
414 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
415 if (IS_ERR(sched->thread)) { 409 if (IS_ERR(sched->thread)) {
416 DRM_ERROR("Failed to create scheduler for id %d.\n", ring); 410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
417 kfree(sched); 411 return PTR_ERR(sched->thread);
418 return NULL;
419 } 412 }
420 413
421 return sched; 414 return 0;
422} 415}
423 416
424/** 417/**
425 * Destroy a gpu scheduler 418 * Destroy a gpu scheduler
426 * 419 *
427 * @sched The pointer to the scheduler 420 * @sched The pointer to the scheduler
428 *
429 * return 0 if succeed. -1 if failed.
430 */ 421 */
431int amd_sched_destroy(struct amd_gpu_scheduler *sched) 422void amd_sched_fini(struct amd_gpu_scheduler *sched)
432{ 423{
433 kthread_stop(sched->thread); 424 kthread_stop(sched->thread);
434 kfree(sched);
435 return 0;
436} 425}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 89d977dd30ac..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -101,23 +101,21 @@ struct amd_sched_backend_ops {
101 * One scheduler is implemented for each hardware ring 101 * One scheduler is implemented for each hardware ring
102*/ 102*/
103struct amd_gpu_scheduler { 103struct amd_gpu_scheduler {
104 struct task_struct *thread; 104 struct amd_sched_backend_ops *ops;
105 uint32_t hw_submission_limit;
106 const char *name;
105 struct amd_sched_rq sched_rq; 107 struct amd_sched_rq sched_rq;
106 struct amd_sched_rq kernel_rq; 108 struct amd_sched_rq kernel_rq;
107 atomic_t hw_rq_count;
108 struct amd_sched_backend_ops *ops;
109 uint32_t ring_id;
110 wait_queue_head_t wake_up_worker; 109 wait_queue_head_t wake_up_worker;
111 wait_queue_head_t job_scheduled; 110 wait_queue_head_t job_scheduled;
112 uint32_t hw_submission_limit; 111 atomic_t hw_rq_count;
113 char name[20]; 112 struct task_struct *thread;
114 void *priv;
115}; 113};
116 114
117struct amd_gpu_scheduler * 115int amd_sched_init(struct amd_gpu_scheduler *sched,
118amd_sched_create(struct amd_sched_backend_ops *ops, 116 struct amd_sched_backend_ops *ops,
119 uint32_t ring, uint32_t hw_submission, void *priv); 117 uint32_t hw_submission, const char *name);
120int amd_sched_destroy(struct amd_gpu_scheduler *sched); 118void amd_sched_fini(struct amd_gpu_scheduler *sched);
121 119
122int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 120int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
123 struct amd_sched_entity *entity, 121 struct amd_sched_entity *entity,