summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-09-04 08:09:36 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-18 02:39:24 -0400
commit2517d59be282426eec7a97745b76d745ff36c388 (patch)
treefaf915b5cfffb781918d674ec7d769feb7e98ac8 /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
parent8381eeea4f9b4717854387068ddf9244973e7d0d (diff)
gpu: nvgpu: move channel_sync_gk20a.* to common directory
1) Move channel_sync_gk20a.* from gk20a/ to common/ directory as they donot program any hardware registers. Also as an add-on rename channel_sync_gk20a.* to channel_sync.* and update the headers in required files. 2) Rename the struct gk20a_channel_sync to struct nvgpu_channel_sync. Also, corresponding syncpt and semaphore versions of the struct alongwith related methods are renamed by removing "gk20a" from their names and adding "nvgpu". 3) Add misra-c cleanups Jira NVGPU-1086 Change-Id: I4e0e21803ca3858dd7a5fc4d2454dba1f1bfcecd Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1812594 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c662
1 files changed, 0 insertions, 662 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
deleted file mode 100644
index d7399403..00000000
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ /dev/null
@@ -1,662 +0,0 @@
1/*
2 * GK20A Channel Synchronization Abstraction
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/semaphore.h>
26#include <nvgpu/kmem.h>
27#include <nvgpu/log.h>
28#include <nvgpu/atomic.h>
29#include <nvgpu/bug.h>
30#include <nvgpu/list.h>
31#include <nvgpu/nvhost.h>
32#include <nvgpu/os_fence.h>
33#include <nvgpu/channel.h>
34
35#include "channel_sync_gk20a.h"
36#include "gk20a.h"
37#include "fence_gk20a.h"
38#include "mm_gk20a.h"
39
40#ifdef CONFIG_TEGRA_GK20A_NVHOST
41
42struct gk20a_channel_syncpt {
43 struct gk20a_channel_sync ops;
44 struct channel_gk20a *c;
45 struct nvgpu_nvhost_dev *nvhost_dev;
46 u32 id;
47 struct nvgpu_mem syncpt_buf;
48};
49
50int gk20a_channel_gen_syncpt_wait_cmd(struct channel_gk20a *c,
51 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
52 u32 wait_cmd_size, int pos, bool preallocated)
53{
54 int err = 0;
55 bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext(
56 c->g->nvhost_dev, id, thresh);
57
58 if (is_expired) {
59 if (preallocated) {
60 nvgpu_memset(c->g, wait_cmd->mem,
61 (wait_cmd->off + pos * wait_cmd_size) * sizeof(u32),
62 0, wait_cmd_size * sizeof(u32));
63 }
64 } else {
65 if (!preallocated) {
66 err = gk20a_channel_alloc_priv_cmdbuf(c,
67 c->g->ops.fifo.get_syncpt_wait_cmd_size(), wait_cmd);
68 if (err) {
69 nvgpu_err(c->g, "not enough priv cmd buffer space");
70 return err;
71 }
72 }
73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
74 id, c->vm->syncpt_ro_map_gpu_va);
75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd,
76 pos * wait_cmd_size, id, thresh,
77 c->vm->syncpt_ro_map_gpu_va);
78 }
79
80 return 0;
81}
82
83static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
84 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd)
85{
86 struct gk20a_channel_syncpt *sp =
87 container_of(s, struct gk20a_channel_syncpt, ops);
88 struct channel_gk20a *c = sp->c;
89 int err = 0;
90 u32 wait_cmd_size = c->g->ops.fifo.get_syncpt_wait_cmd_size();
91
92 if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(sp->nvhost_dev, id))
93 return -EINVAL;
94
95 err = gk20a_channel_gen_syncpt_wait_cmd(c, id, thresh,
96 wait_cmd, wait_cmd_size, 0, false);
97
98 return err;
99}
100
101static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
102 struct priv_cmd_entry *wait_cmd, int max_wait_cmds)
103{
104 struct nvgpu_os_fence os_fence = {0};
105 struct gk20a_channel_syncpt *sp =
106 container_of(s, struct gk20a_channel_syncpt, ops);
107 struct channel_gk20a *c = sp->c;
108 int err = 0;
109
110 err = nvgpu_os_fence_fdget(&os_fence, c, fd);
111 if (err)
112 return -EINVAL;
113
114 err = os_fence.ops->program_waits(&os_fence,
115 wait_cmd, c, max_wait_cmds);
116
117 os_fence.ops->drop_ref(&os_fence);
118
119 return err;
120}
121
122static void gk20a_channel_syncpt_update(void *priv, int nr_completed)
123{
124 struct channel_gk20a *ch = priv;
125
126 gk20a_channel_update(ch);
127
128 /* note: channel_get() is in __gk20a_channel_syncpt_incr() */
129 gk20a_channel_put(ch);
130}
131
132static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
133 bool wfi_cmd,
134 bool register_irq,
135 struct priv_cmd_entry *incr_cmd,
136 struct gk20a_fence *fence,
137 bool need_sync_fence)
138{
139 u32 thresh;
140 int err;
141 struct gk20a_channel_syncpt *sp =
142 container_of(s, struct gk20a_channel_syncpt, ops);
143 struct channel_gk20a *c = sp->c;
144 struct nvgpu_os_fence os_fence = {0};
145
146 err = gk20a_channel_alloc_priv_cmdbuf(c,
147 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd),
148 incr_cmd);
149 if (err)
150 return err;
151
152 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
153 sp->id, sp->syncpt_buf.gpu_va);
154 c->g->ops.fifo.add_syncpt_incr_cmd(c->g, wfi_cmd,
155 incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
156
157 thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id,
158 c->g->ops.fifo.get_syncpt_incr_per_release());
159
160 if (register_irq) {
161 struct channel_gk20a *referenced = gk20a_channel_get(c);
162
163 WARN_ON(!referenced);
164
165 if (referenced) {
166 /* note: channel_put() is in
167 * gk20a_channel_syncpt_update() */
168
169 err = nvgpu_nvhost_intr_register_notifier(
170 sp->nvhost_dev,
171 sp->id, thresh,
172 gk20a_channel_syncpt_update, c);
173 if (err)
174 gk20a_channel_put(referenced);
175
176 /* Adding interrupt action should
177 * never fail. A proper error handling
178 * here would require us to decrement
179 * the syncpt max back to its original
180 * value. */
181 WARN(err,
182 "failed to set submit complete interrupt");
183 }
184 }
185
186 if (need_sync_fence) {
187 err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost_dev,
188 sp->id, thresh);
189
190 if (err)
191 goto clean_up_priv_cmd;
192 }
193
194 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev,
195 sp->id, thresh, os_fence);
196
197 if (err) {
198 if (nvgpu_os_fence_is_initialized(&os_fence))
199 os_fence.ops->drop_ref(&os_fence);
200 goto clean_up_priv_cmd;
201 }
202
203 return 0;
204
205clean_up_priv_cmd:
206 gk20a_free_priv_cmdbuf(c, incr_cmd);
207 return err;
208}
209
210static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
211 struct priv_cmd_entry *entry,
212 struct gk20a_fence *fence,
213 bool need_sync_fence,
214 bool register_irq)
215{
216 /* Don't put wfi cmd to this one since we're not returning
217 * a fence to user space. */
218 return __gk20a_channel_syncpt_incr(s,
219 false /* no wfi */,
220 register_irq /* register irq */,
221 entry, fence, need_sync_fence);
222}
223
224static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
225 int wait_fence_fd,
226 struct priv_cmd_entry *entry,
227 struct gk20a_fence *fence,
228 bool wfi,
229 bool need_sync_fence,
230 bool register_irq)
231{
232 /* Need to do 'wfi + host incr' since we return the fence
233 * to user space. */
234 return __gk20a_channel_syncpt_incr(s,
235 wfi,
236 register_irq /* register irq */,
237 entry, fence, need_sync_fence);
238}
239
240static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
241{
242 struct gk20a_channel_syncpt *sp =
243 container_of(s, struct gk20a_channel_syncpt, ops);
244 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
245}
246
247static void gk20a_channel_syncpt_set_safe_state(struct gk20a_channel_sync *s)
248{
249 struct gk20a_channel_syncpt *sp =
250 container_of(s, struct gk20a_channel_syncpt, ops);
251 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id);
252}
253
254static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s)
255{
256 struct gk20a_channel_syncpt *sp =
257 container_of(s, struct gk20a_channel_syncpt, ops);
258 return sp->id;
259}
260
261static u64 gk20a_channel_syncpt_address(struct gk20a_channel_sync *s)
262{
263 struct gk20a_channel_syncpt *sp =
264 container_of(s, struct gk20a_channel_syncpt, ops);
265 return sp->syncpt_buf.gpu_va;
266}
267
268static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
269{
270 struct gk20a_channel_syncpt *sp =
271 container_of(s, struct gk20a_channel_syncpt, ops);
272
273
274 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf);
275
276 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
277 nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id);
278 nvgpu_kfree(sp->c->g, sp);
279}
280
281static struct gk20a_channel_sync *
282gk20a_channel_syncpt_create(struct channel_gk20a *c, bool user_managed)
283{
284 struct gk20a_channel_syncpt *sp;
285 char syncpt_name[32];
286
287 sp = nvgpu_kzalloc(c->g, sizeof(*sp));
288 if (!sp)
289 return NULL;
290
291 sp->c = c;
292 sp->nvhost_dev = c->g->nvhost_dev;
293
294 if (user_managed) {
295 snprintf(syncpt_name, sizeof(syncpt_name),
296 "%s_%d_user", c->g->name, c->chid);
297
298 sp->id = nvgpu_nvhost_get_syncpt_client_managed(sp->nvhost_dev,
299 syncpt_name);
300 } else {
301 snprintf(syncpt_name, sizeof(syncpt_name),
302 "%s_%d", c->g->name, c->chid);
303
304 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev,
305 c->chid, syncpt_name);
306 }
307 if (!sp->id) {
308 nvgpu_kfree(c->g, sp);
309 nvgpu_err(c->g, "failed to get free syncpt");
310 return NULL;
311 }
312
313 sp->c->g->ops.fifo.alloc_syncpt_buf(sp->c, sp->id,
314 &sp->syncpt_buf);
315
316 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
317
318 nvgpu_atomic_set(&sp->ops.refcount, 0);
319 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt;
320 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd;
321 sp->ops.incr = gk20a_channel_syncpt_incr;
322 sp->ops.incr_user = gk20a_channel_syncpt_incr_user;
323 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max;
324 sp->ops.set_safe_state = gk20a_channel_syncpt_set_safe_state;
325 sp->ops.syncpt_id = gk20a_channel_syncpt_id;
326 sp->ops.syncpt_address = gk20a_channel_syncpt_address;
327 sp->ops.destroy = gk20a_channel_syncpt_destroy;
328
329 return &sp->ops;
330}
331#endif /* CONFIG_TEGRA_GK20A_NVHOST */
332
333struct gk20a_channel_semaphore {
334 struct gk20a_channel_sync ops;
335 struct channel_gk20a *c;
336
337 /* A semaphore pool owned by this channel. */
338 struct nvgpu_semaphore_pool *pool;
339};
340
341static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
342 struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
343 u32 offset, bool acquire, bool wfi)
344{
345 int ch = c->chid;
346 u32 ob, off = cmd->off + offset;
347 u64 va;
348
349 ob = off;
350
351 /*
352 * RO for acquire (since we just need to read the mem) and RW for
353 * release since we will need to write back to the semaphore memory.
354 */
355 va = acquire ? nvgpu_semaphore_gpu_ro_va(s) :
356 nvgpu_semaphore_gpu_rw_va(s);
357
358 /*
359 * If the op is not an acquire (so therefor a release) we should
360 * incr the underlying sema next_value.
361 */
362 if (!acquire) {
363 nvgpu_semaphore_prepare(s, c->hw_sema);
364 }
365
366 g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
367
368 if (acquire) {
369 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
370 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
371 ch, nvgpu_semaphore_get_value(s),
372 s->location.pool->page_idx, va, cmd->gva,
373 cmd->mem->gpu_va, ob);
374 } else {
375 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu"
376 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
377 ch, nvgpu_semaphore_get_value(s),
378 nvgpu_semaphore_read(s),
379 s->location.pool->page_idx,
380 va, cmd->gva, cmd->mem->gpu_va, ob);
381 }
382}
383
384void gk20a_channel_gen_sema_wait_cmd(struct channel_gk20a *c,
385 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
386 u32 wait_cmd_size, int pos)
387{
388 if (!sema) {
389 /* expired */
390 nvgpu_memset(c->g, wait_cmd->mem,
391 (wait_cmd->off + pos * wait_cmd_size) * sizeof(u32),
392 0, wait_cmd_size * sizeof(u32));
393 } else {
394 WARN_ON(!sema->incremented);
395 add_sema_cmd(c->g, c, sema, wait_cmd,
396 pos * wait_cmd_size, true, false);
397 nvgpu_semaphore_put(sema);
398 }
399}
400
401static int gk20a_channel_semaphore_wait_syncpt(
402 struct gk20a_channel_sync *s, u32 id,
403 u32 thresh, struct priv_cmd_entry *entry)
404{
405 struct gk20a_channel_semaphore *sema =
406 container_of(s, struct gk20a_channel_semaphore, ops);
407 struct gk20a *g = sema->c->g;
408 nvgpu_err(g, "trying to use syncpoint synchronization");
409 return -ENODEV;
410}
411
412static int gk20a_channel_semaphore_wait_fd(
413 struct gk20a_channel_sync *s, int fd,
414 struct priv_cmd_entry *entry, int max_wait_cmds)
415{
416 struct gk20a_channel_semaphore *sema =
417 container_of(s, struct gk20a_channel_semaphore, ops);
418 struct channel_gk20a *c = sema->c;
419
420 struct nvgpu_os_fence os_fence = {0};
421 int err;
422
423 err = nvgpu_os_fence_fdget(&os_fence, c, fd);
424 if (err) {
425 return err;
426 }
427
428 err = os_fence.ops->program_waits(&os_fence,
429 entry, c, max_wait_cmds);
430
431 os_fence.ops->drop_ref(&os_fence);
432
433 return err;
434}
435
436static int __gk20a_channel_semaphore_incr(
437 struct gk20a_channel_sync *s, bool wfi_cmd,
438 struct priv_cmd_entry *incr_cmd,
439 struct gk20a_fence *fence,
440 bool need_sync_fence)
441{
442 int incr_cmd_size;
443 struct gk20a_channel_semaphore *sp =
444 container_of(s, struct gk20a_channel_semaphore, ops);
445 struct channel_gk20a *c = sp->c;
446 struct nvgpu_semaphore *semaphore;
447 int err = 0;
448 struct nvgpu_os_fence os_fence = {0};
449
450 semaphore = nvgpu_semaphore_alloc(c);
451 if (!semaphore) {
452 nvgpu_err(c->g,
453 "ran out of semaphores");
454 return -ENOMEM;
455 }
456
457 incr_cmd_size = c->g->ops.fifo.get_sema_incr_cmd_size();
458 err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
459 if (err) {
460 nvgpu_err(c->g,
461 "not enough priv cmd buffer space");
462 goto clean_up_sema;
463 }
464
465 /* Release the completion semaphore. */
466 add_sema_cmd(c->g, c, semaphore, incr_cmd, 0, false, wfi_cmd);
467
468 if (need_sync_fence) {
469 err = nvgpu_os_fence_sema_create(&os_fence, c,
470 semaphore);
471
472 if (err) {
473 goto clean_up_sema;
474 }
475 }
476
477 err = gk20a_fence_from_semaphore(fence,
478 semaphore,
479 &c->semaphore_wq,
480 os_fence);
481
482 if (err) {
483 if (nvgpu_os_fence_is_initialized(&os_fence)) {
484 os_fence.ops->drop_ref(&os_fence);
485 }
486 goto clean_up_sema;
487 }
488
489 return 0;
490
491clean_up_sema:
492 nvgpu_semaphore_put(semaphore);
493 return err;
494}
495
496static int gk20a_channel_semaphore_incr(
497 struct gk20a_channel_sync *s,
498 struct priv_cmd_entry *entry,
499 struct gk20a_fence *fence,
500 bool need_sync_fence,
501 bool register_irq)
502{
503 /* Don't put wfi cmd to this one since we're not returning
504 * a fence to user space. */
505 return __gk20a_channel_semaphore_incr(s,
506 false /* no wfi */,
507 entry, fence, need_sync_fence);
508}
509
510static int gk20a_channel_semaphore_incr_user(
511 struct gk20a_channel_sync *s,
512 int wait_fence_fd,
513 struct priv_cmd_entry *entry,
514 struct gk20a_fence *fence,
515 bool wfi,
516 bool need_sync_fence,
517 bool register_irq)
518{
519#ifdef CONFIG_SYNC
520 int err;
521
522 err = __gk20a_channel_semaphore_incr(s, wfi, entry, fence,
523 need_sync_fence);
524 if (err)
525 return err;
526
527 return 0;
528#else
529 struct gk20a_channel_semaphore *sema =
530 container_of(s, struct gk20a_channel_semaphore, ops);
531 nvgpu_err(sema->c->g,
532 "trying to use sync fds with CONFIG_SYNC disabled");
533 return -ENODEV;
534#endif
535}
536
537static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
538{
539 struct gk20a_channel_semaphore *sp =
540 container_of(s, struct gk20a_channel_semaphore, ops);
541 struct channel_gk20a *c = sp->c;
542 bool updated;
543
544 if (!c->hw_sema) {
545 return;
546 }
547
548 updated = nvgpu_semaphore_reset(c->hw_sema);
549
550 if (updated) {
551 nvgpu_cond_broadcast_interruptible(&c->semaphore_wq);
552 }
553}
554
555static void gk20a_channel_semaphore_set_safe_state(struct gk20a_channel_sync *s)
556{
557 /* Nothing to do. */
558}
559
560static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s)
561{
562 return -EINVAL;
563}
564
565static u64 gk20a_channel_semaphore_syncpt_address(struct gk20a_channel_sync *s)
566{
567 return 0;
568}
569
570static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
571{
572 struct gk20a_channel_semaphore *sema =
573 container_of(s, struct gk20a_channel_semaphore, ops);
574
575 struct channel_gk20a *c = sema->c;
576 struct gk20a *g = c->g;
577
578 if (c->has_os_fence_framework_support &&
579 g->os_channel.os_fence_framework_inst_exists(c)) {
580 g->os_channel.destroy_os_fence_framework(c);
581 }
582
583 /* The sema pool is cleaned up by the VM destroy. */
584 sema->pool = NULL;
585
586 nvgpu_kfree(sema->c->g, sema);
587}
588
589static struct gk20a_channel_sync *
590gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
591{
592 struct gk20a_channel_semaphore *sema;
593 struct gk20a *g = c->g;
594 char pool_name[20];
595 int asid = -1;
596 int err;
597
598 if (WARN_ON(!c->vm)) {
599 return NULL;
600 }
601
602 sema = nvgpu_kzalloc(c->g, sizeof(*sema));
603 if (!sema) {
604 return NULL;
605 }
606 sema->c = c;
607
608 sprintf(pool_name, "semaphore_pool-%d", c->chid);
609 sema->pool = c->vm->sema_pool;
610
611 if (c->vm->as_share) {
612 asid = c->vm->as_share->id;
613 }
614
615 if (c->has_os_fence_framework_support) {
616 /*Init the sync_timeline for this channel */
617 err = g->os_channel.init_os_fence_framework(c,
618 "gk20a_ch%d_as%d", c->chid, asid);
619
620 if (err) {
621 nvgpu_kfree(g, sema);
622 return NULL;
623 }
624 }
625
626 nvgpu_atomic_set(&sema->ops.refcount, 0);
627 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt;
628 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd;
629 sema->ops.incr = gk20a_channel_semaphore_incr;
630 sema->ops.incr_user = gk20a_channel_semaphore_incr_user;
631 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
632 sema->ops.set_safe_state = gk20a_channel_semaphore_set_safe_state;
633 sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id;
634 sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address;
635 sema->ops.destroy = gk20a_channel_semaphore_destroy;
636
637 return &sema->ops;
638}
639
640void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync,
641 bool set_safe_state)
642{
643 if (set_safe_state) {
644 sync->set_safe_state(sync);
645 }
646 sync->destroy(sync);
647}
648
649struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c,
650 bool user_managed)
651{
652#ifdef CONFIG_TEGRA_GK20A_NVHOST
653 if (gk20a_platform_has_syncpoints(c->g))
654 return gk20a_channel_syncpt_create(c, user_managed);
655#endif
656 return gk20a_channel_semaphore_create(c, user_managed);
657}
658
659bool gk20a_channel_sync_needs_sync_framework(struct gk20a *g)
660{
661 return !gk20a_platform_has_syncpoints(g);
662}