aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-01 23:18:04 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:51:07 -0400
commitf556cb0caeec1ba9b8e5e2aa85b47e76277f5d4b (patch)
tree1325c1eb049a5a85901437743ea0c3f499f73f2d
parent4af9f07ccdac96e16f7a0ddaf983891a29ebd11a (diff)
drm/amd: add scheduler fence implementation (v2)
scheduler fence is based on kernel fence framework. v2: squash in Christian's build fix Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c34
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c26
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h33
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c112
9 files changed, 202 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index f1cb7d2fa411..04c270757030 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -86,6 +86,7 @@ amdgpu-y += amdgpu_cgs.o
86# GPU scheduler 86# GPU scheduler
87amdgpu-y += \ 87amdgpu-y += \
88 ../scheduler/gpu_scheduler.o \ 88 ../scheduler/gpu_scheduler.o \
89 ../scheduler/sched_fence.o \
89 amdgpu_sched.o 90 amdgpu_sched.o
90 91
91amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o 92amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 987e3075a03f..2ba448ee948b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1261,6 +1261,7 @@ struct amdgpu_cs_parser {
1261 int (*prepare_job)(struct amdgpu_cs_parser *sched_job); 1261 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
1262 int (*run_job)(struct amdgpu_cs_parser *sched_job); 1262 int (*run_job)(struct amdgpu_cs_parser *sched_job);
1263 int (*free_job)(struct amdgpu_cs_parser *sched_job); 1263 int (*free_job)(struct amdgpu_cs_parser *sched_job);
1264 struct amd_sched_fence *s_fence;
1264}; 1265};
1265 1266
1266static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1267static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b1dc7e1ed271..f428288d8363 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -899,8 +899,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
899 if (amdgpu_enable_scheduler && parser->num_ibs) { 899 if (amdgpu_enable_scheduler && parser->num_ibs) {
900 struct amdgpu_ring * ring = 900 struct amdgpu_ring * ring =
901 amdgpu_cs_parser_get_ring(adev, parser); 901 amdgpu_cs_parser_get_ring(adev, parser);
902 parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
903 &parser->ctx->rings[ring->idx].entity.last_queued_v_seq);
904 if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) { 902 if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
905 r = amdgpu_cs_parser_prepare_job(parser); 903 r = amdgpu_cs_parser_prepare_job(parser);
906 if (r) 904 if (r)
@@ -910,10 +908,21 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
910 parser->ring = ring; 908 parser->ring = ring;
911 parser->run_job = amdgpu_cs_parser_run_job; 909 parser->run_job = amdgpu_cs_parser_run_job;
912 parser->free_job = amdgpu_cs_parser_free_job; 910 parser->free_job = amdgpu_cs_parser_free_job;
913 amd_sched_push_job(ring->scheduler, 911 mutex_lock(&parser->job_lock);
914 &parser->ctx->rings[ring->idx].entity, 912 r = amd_sched_push_job(ring->scheduler,
915 parser); 913 &parser->ctx->rings[ring->idx].entity,
916 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; 914 parser,
915 &parser->s_fence);
916 if (r) {
917 mutex_unlock(&parser->job_lock);
918 goto out;
919 }
920 parser->ibs[parser->num_ibs - 1].sequence =
921 amdgpu_ctx_add_fence(parser->ctx, ring,
922 &parser->s_fence->base,
923 parser->s_fence->v_seq);
924 cs->out.handle = parser->s_fence->v_seq;
925 mutex_unlock(&parser->job_lock);
917 up_read(&adev->exclusive_lock); 926 up_read(&adev->exclusive_lock);
918 return 0; 927 return 0;
919 } 928 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 232e800eea56..1833f05c7e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -268,16 +268,6 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
268 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 268 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
269 struct fence *fence; 269 struct fence *fence;
270 uint64_t queued_seq; 270 uint64_t queued_seq;
271 int r;
272
273 if (amdgpu_enable_scheduler) {
274 r = amd_sched_wait_emit(&cring->entity,
275 seq,
276 false,
277 -1);
278 if (r)
279 return NULL;
280 }
281 271
282 spin_lock(&ctx->ring_lock); 272 spin_lock(&ctx->ring_lock);
283 if (amdgpu_enable_scheduler) 273 if (amdgpu_enable_scheduler)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index eed409c59492..5104e64e9ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -218,7 +218,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
218 218
219 sequence = amdgpu_enable_scheduler ? ib->sequence : 0; 219 sequence = amdgpu_enable_scheduler ? ib->sequence : 0;
220 220
221 if (ib->ctx) 221 if (!amdgpu_enable_scheduler && ib->ctx)
222 ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, 222 ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
223 &ib->fence->base, 223 &ib->fence->base,
224 sequence); 224 sequence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index d82f2481bd0e..6a7e83edcaa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -118,7 +118,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
118{ 118{
119 int r = 0; 119 int r = 0;
120 if (amdgpu_enable_scheduler) { 120 if (amdgpu_enable_scheduler) {
121 uint64_t v_seq;
122 struct amdgpu_cs_parser *sched_job = 121 struct amdgpu_cs_parser *sched_job =
123 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, 122 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
124 ibs, num_ibs); 123 ibs, num_ibs);
@@ -126,22 +125,23 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
126 return -ENOMEM; 125 return -ENOMEM;
127 } 126 }
128 sched_job->free_job = free_job; 127 sched_job->free_job = free_job;
129 v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq); 128 mutex_lock(&sched_job->job_lock);
130 ibs[num_ibs - 1].sequence = v_seq; 129 r = amd_sched_push_job(ring->scheduler,
131 amd_sched_push_job(ring->scheduler, 130 &adev->kernel_ctx.rings[ring->idx].entity,
132 &adev->kernel_ctx.rings[ring->idx].entity, 131 sched_job, &sched_job->s_fence);
133 sched_job); 132 if (r) {
134 r = amd_sched_wait_emit( 133 mutex_unlock(&sched_job->job_lock);
135 &adev->kernel_ctx.rings[ring->idx].entity, 134 kfree(sched_job);
136 v_seq, 135 return r;
137 false, 136 }
138 -1); 137 ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
139 if (r) 138 *f = &sched_job->s_fence->base;
140 WARN(true, "emit timeout\n"); 139 mutex_unlock(&sched_job->job_lock);
141 } else 140 } else {
142 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 141 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
143 if (r) 142 if (r)
144 return r; 143 return r;
145 *f = &ibs[num_ibs - 1].fence->base; 144 *f = &ibs[num_ibs - 1].fence->base;
145 }
146 return 0; 146 return 0;
147} 147}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 33b4f55e48b1..402086d96889 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -180,6 +180,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
180 uint32_t jobs) 180 uint32_t jobs)
181{ 181{
182 uint64_t seq_ring = 0; 182 uint64_t seq_ring = 0;
183 char name[20];
183 184
184 if (!(sched && entity && rq)) 185 if (!(sched && entity && rq))
185 return -EINVAL; 186 return -EINVAL;
@@ -191,6 +192,10 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
191 entity->scheduler = sched; 192 entity->scheduler = sched;
192 init_waitqueue_head(&entity->wait_queue); 193 init_waitqueue_head(&entity->wait_queue);
193 init_waitqueue_head(&entity->wait_emit); 194 init_waitqueue_head(&entity->wait_emit);
195 entity->fence_context = fence_context_alloc(1);
196 snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
197 memcpy(entity->name, name, 20);
198 INIT_LIST_HEAD(&entity->fence_list);
194 if(kfifo_alloc(&entity->job_queue, 199 if(kfifo_alloc(&entity->job_queue,
195 jobs * sizeof(void *), 200 jobs * sizeof(void *),
196 GFP_KERNEL)) 201 GFP_KERNEL))
@@ -199,6 +204,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
199 spin_lock_init(&entity->queue_lock); 204 spin_lock_init(&entity->queue_lock);
200 atomic64_set(&entity->last_emitted_v_seq, seq_ring); 205 atomic64_set(&entity->last_emitted_v_seq, seq_ring);
201 atomic64_set(&entity->last_queued_v_seq, seq_ring); 206 atomic64_set(&entity->last_queued_v_seq, seq_ring);
207 atomic64_set(&entity->last_signaled_v_seq, seq_ring);
202 208
203 /* Add the entity to the run queue */ 209 /* Add the entity to the run queue */
204 mutex_lock(&rq->lock); 210 mutex_lock(&rq->lock);
@@ -291,15 +297,25 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
291*/ 297*/
292int amd_sched_push_job(struct amd_gpu_scheduler *sched, 298int amd_sched_push_job(struct amd_gpu_scheduler *sched,
293 struct amd_sched_entity *c_entity, 299 struct amd_sched_entity *c_entity,
294 void *data) 300 void *data,
301 struct amd_sched_fence **fence)
295{ 302{
296 struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job), 303 struct amd_sched_job *job;
297 GFP_KERNEL); 304
305 if (!fence)
306 return -EINVAL;
307 job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL);
298 if (!job) 308 if (!job)
299 return -ENOMEM; 309 return -ENOMEM;
300 job->sched = sched; 310 job->sched = sched;
301 job->s_entity = c_entity; 311 job->s_entity = c_entity;
302 job->data = data; 312 job->data = data;
313 *fence = amd_sched_fence_create(c_entity);
314 if ((*fence) == NULL) {
315 kfree(job);
316 return -EINVAL;
317 }
318 job->s_fence = *fence;
303 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), 319 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
304 &c_entity->queue_lock) != sizeof(void *)) { 320 &c_entity->queue_lock) != sizeof(void *)) {
305 /** 321 /**
@@ -368,12 +384,16 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
368 unsigned long flags; 384 unsigned long flags;
369 385
370 sched = sched_job->sched; 386 sched = sched_job->sched;
387 atomic64_set(&sched_job->s_entity->last_signaled_v_seq,
388 sched_job->s_fence->v_seq);
389 amd_sched_fence_signal(sched_job->s_fence);
371 spin_lock_irqsave(&sched->queue_lock, flags); 390 spin_lock_irqsave(&sched->queue_lock, flags);
372 list_del(&sched_job->list); 391 list_del(&sched_job->list);
373 atomic64_dec(&sched->hw_rq_count); 392 atomic64_dec(&sched->hw_rq_count);
374 spin_unlock_irqrestore(&sched->queue_lock, flags); 393 spin_unlock_irqrestore(&sched->queue_lock, flags);
375 394
376 sched->ops->process_job(sched, sched_job); 395 sched->ops->process_job(sched, sched_job);
396 fence_put(&sched_job->s_fence->base);
377 kfree(sched_job); 397 kfree(sched_job);
378 wake_up_interruptible(&sched->wait_queue); 398 wake_up_interruptible(&sched->wait_queue);
379} 399}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f54615d6a500..300132f14d74 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -45,6 +45,7 @@ struct amd_sched_entity {
45 /* the virtual_seq is unique per context per ring */ 45 /* the virtual_seq is unique per context per ring */
46 atomic64_t last_queued_v_seq; 46 atomic64_t last_queued_v_seq;
47 atomic64_t last_emitted_v_seq; 47 atomic64_t last_emitted_v_seq;
48 atomic64_t last_signaled_v_seq;
48 /* the job_queue maintains the jobs submitted by clients */ 49 /* the job_queue maintains the jobs submitted by clients */
49 struct kfifo job_queue; 50 struct kfifo job_queue;
50 spinlock_t queue_lock; 51 spinlock_t queue_lock;
@@ -52,6 +53,9 @@ struct amd_sched_entity {
52 wait_queue_head_t wait_queue; 53 wait_queue_head_t wait_queue;
53 wait_queue_head_t wait_emit; 54 wait_queue_head_t wait_emit;
54 bool is_pending; 55 bool is_pending;
56 uint64_t fence_context;
57 struct list_head fence_list;
58 char name[20];
55}; 59};
56 60
57/** 61/**
@@ -72,14 +76,35 @@ struct amd_run_queue {
72 int (*check_entity_status)(struct amd_sched_entity *entity); 76 int (*check_entity_status)(struct amd_sched_entity *entity);
73}; 77};
74 78
79struct amd_sched_fence {
80 struct fence base;
81 struct fence_cb cb;
82 struct list_head list;
83 struct amd_sched_entity *entity;
84 uint64_t v_seq;
85 spinlock_t lock;
86};
87
75struct amd_sched_job { 88struct amd_sched_job {
76 struct list_head list; 89 struct list_head list;
77 struct fence_cb cb; 90 struct fence_cb cb;
78 struct amd_gpu_scheduler *sched; 91 struct amd_gpu_scheduler *sched;
79 struct amd_sched_entity *s_entity; 92 struct amd_sched_entity *s_entity;
80 void *data; 93 void *data;
94 struct amd_sched_fence *s_fence;
81}; 95};
82 96
97extern const struct fence_ops amd_sched_fence_ops;
98static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
99{
100 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
101
102 if (__f->base.ops == &amd_sched_fence_ops)
103 return __f;
104
105 return NULL;
106}
107
83/** 108/**
84 * Define the backend operations called by the scheduler, 109 * Define the backend operations called by the scheduler,
85 * these functions should be implemented in driver side 110 * these functions should be implemented in driver side
@@ -126,7 +151,8 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched);
126 151
127int amd_sched_push_job(struct amd_gpu_scheduler *sched, 152int amd_sched_push_job(struct amd_gpu_scheduler *sched,
128 struct amd_sched_entity *c_entity, 153 struct amd_sched_entity *c_entity,
129 void *data); 154 void *data,
155 struct amd_sched_fence **fence);
130 156
131int amd_sched_wait_emit(struct amd_sched_entity *c_entity, 157int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
132 uint64_t seq, 158 uint64_t seq,
@@ -146,4 +172,9 @@ void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq);
146 172
147uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity); 173uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
148 174
175struct amd_sched_fence *amd_sched_fence_create(
176 struct amd_sched_entity *s_entity);
177void amd_sched_fence_signal(struct amd_sched_fence *fence);
178
179
149#endif 180#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 000000000000..d580a357c547
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30static void amd_sched_fence_wait_cb(struct fence *f, struct fence_cb *cb)
31{
32 struct amd_sched_fence *fence =
33 container_of(cb, struct amd_sched_fence, cb);
34 list_del_init(&fence->list);
35 fence_put(&fence->base);
36}
37
38struct amd_sched_fence *amd_sched_fence_create(
39 struct amd_sched_entity *s_entity)
40{
41 struct amd_sched_fence *fence = NULL;
42 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
43 if (fence == NULL)
44 return NULL;
45 fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq);
46 fence->entity = s_entity;
47 spin_lock_init(&fence->lock);
48 fence_init(&fence->base, &amd_sched_fence_ops,
49 &fence->lock,
50 s_entity->fence_context,
51 fence->v_seq);
52 fence_get(&fence->base);
53 list_add_tail(&fence->list, &s_entity->fence_list);
54 if (fence_add_callback(&fence->base,&fence->cb,
55 amd_sched_fence_wait_cb)) {
56 fence_put(&fence->base);
57 kfree(fence);
58 return NULL;
59 }
60 return fence;
61}
62
63bool amd_sched_check_ts(struct amd_sched_entity *s_entity, uint64_t v_seq)
64{
65 return atomic64_read(&s_entity->last_signaled_v_seq) >= v_seq ? true : false;
66}
67
68void amd_sched_fence_signal(struct amd_sched_fence *fence)
69{
70 if (amd_sched_check_ts(fence->entity, fence->v_seq)) {
71 int ret = fence_signal_locked(&fence->base);
72 if (!ret)
73 FENCE_TRACE(&fence->base, "signaled from irq context\n");
74 else
75 FENCE_TRACE(&fence->base, "was already signaled\n");
76 } else
77 WARN(true, "fence process dismattch with job!\n");
78}
79
80static const char *amd_sched_fence_get_driver_name(struct fence *fence)
81{
82 return "amd_sched";
83}
84
85static const char *amd_sched_fence_get_timeline_name(struct fence *f)
86{
87 struct amd_sched_fence *fence = to_amd_sched_fence(f);
88 return (const char *)fence->entity->name;
89}
90
91static bool amd_sched_fence_enable_signaling(struct fence *f)
92{
93 struct amd_sched_fence *fence = to_amd_sched_fence(f);
94
95 return !amd_sched_check_ts(fence->entity, fence->v_seq);
96}
97
98static bool amd_sched_fence_is_signaled(struct fence *f)
99{
100 struct amd_sched_fence *fence = to_amd_sched_fence(f);
101
102 return amd_sched_check_ts(fence->entity, fence->v_seq);
103}
104
105const struct fence_ops amd_sched_fence_ops = {
106 .get_driver_name = amd_sched_fence_get_driver_name,
107 .get_timeline_name = amd_sched_fence_get_timeline_name,
108 .enable_signaling = amd_sched_fence_enable_signaling,
109 .signaled = amd_sched_fence_is_signaled,
110 .wait = fence_default_wait,
111 .release = NULL,
112};