summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c233
1 files changed, 59 insertions, 174 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 0dcc5abb..4d366fa9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -19,6 +19,7 @@
19 19
20#include "channel_sync_gk20a.h" 20#include "channel_sync_gk20a.h"
21#include "gk20a.h" 21#include "gk20a.h"
22#include "fence_gk20a.h"
22#include "semaphore_gk20a.h" 23#include "semaphore_gk20a.h"
23#include "sync_gk20a.h" 24#include "sync_gk20a.h"
24#include "mm_gk20a.h" 25#include "mm_gk20a.h"
@@ -52,33 +53,9 @@ static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
52 ptr[3] = (id << 8) | 0x10; 53 ptr[3] = (id << 8) | 0x10;
53} 54}
54 55
55int gk20a_channel_syncpt_wait_cpu(struct gk20a_channel_sync *s,
56 struct gk20a_channel_fence *fence,
57 int timeout)
58{
59 struct gk20a_channel_syncpt *sp =
60 container_of(s, struct gk20a_channel_syncpt, ops);
61 if (!fence->valid)
62 return 0;
63 return nvhost_syncpt_wait_timeout_ext(
64 sp->host1x_pdev, sp->id, fence->thresh,
65 timeout, NULL, NULL);
66}
67
68bool gk20a_channel_syncpt_is_expired(struct gk20a_channel_sync *s,
69 struct gk20a_channel_fence *fence)
70{
71 struct gk20a_channel_syncpt *sp =
72 container_of(s, struct gk20a_channel_syncpt, ops);
73 if (!fence->valid)
74 return true;
75 return nvhost_syncpt_is_expired_ext(sp->host1x_pdev, sp->id,
76 fence->thresh);
77}
78
79int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id, 56int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
80 u32 thresh, struct priv_cmd_entry **entry, 57 u32 thresh, struct priv_cmd_entry **entry,
81 struct gk20a_channel_fence *fence) 58 struct gk20a_fence **fence)
82{ 59{
83 struct gk20a_channel_syncpt *sp = 60 struct gk20a_channel_syncpt *sp =
84 container_of(s, struct gk20a_channel_syncpt, ops); 61 container_of(s, struct gk20a_channel_syncpt, ops);
@@ -103,13 +80,13 @@ int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
103 add_wait_cmd(&wait_cmd->ptr[0], id, thresh); 80 add_wait_cmd(&wait_cmd->ptr[0], id, thresh);
104 81
105 *entry = wait_cmd; 82 *entry = wait_cmd;
106 fence->valid = false; 83 *fence = NULL;
107 return 0; 84 return 0;
108} 85}
109 86
110int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd, 87int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
111 struct priv_cmd_entry **entry, 88 struct priv_cmd_entry **entry,
112 struct gk20a_channel_fence *fence) 89 struct gk20a_fence **fence)
113{ 90{
114#ifdef CONFIG_SYNC 91#ifdef CONFIG_SYNC
115 int i; 92 int i;
@@ -164,7 +141,7 @@ int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
164 sync_fence_put(sync_fence); 141 sync_fence_put(sync_fence);
165 142
166 *entry = wait_cmd; 143 *entry = wait_cmd;
167 fence->valid = false; 144 *fence = NULL;
168 return 0; 145 return 0;
169#else 146#else
170 return -ENODEV; 147 return -ENODEV;
@@ -181,7 +158,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
181 bool gfx_class, bool wfi_cmd, 158 bool gfx_class, bool wfi_cmd,
182 bool register_irq, 159 bool register_irq,
183 struct priv_cmd_entry **entry, 160 struct priv_cmd_entry **entry,
184 struct gk20a_channel_fence *fence) 161 struct gk20a_fence **fence)
185{ 162{
186 u32 thresh; 163 u32 thresh;
187 int incr_cmd_size; 164 int incr_cmd_size;
@@ -253,16 +230,15 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
253 WARN(err, "failed to set submit complete interrupt"); 230 WARN(err, "failed to set submit complete interrupt");
254 } 231 }
255 232
256 fence->thresh = thresh; 233 *fence = gk20a_fence_from_syncpt(sp->host1x_pdev, sp->id, thresh,
257 fence->valid = true; 234 wfi_cmd);
258 fence->wfi = wfi_cmd;
259 *entry = incr_cmd; 235 *entry = incr_cmd;
260 return 0; 236 return 0;
261} 237}
262 238
263int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s, 239int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
264 struct priv_cmd_entry **entry, 240 struct priv_cmd_entry **entry,
265 struct gk20a_channel_fence *fence) 241 struct gk20a_fence **fence)
266{ 242{
267 return __gk20a_channel_syncpt_incr(s, 243 return __gk20a_channel_syncpt_incr(s,
268 false /* use host class */, 244 false /* use host class */,
@@ -273,7 +249,7 @@ int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
273 249
274int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s, 250int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
275 struct priv_cmd_entry **entry, 251 struct priv_cmd_entry **entry,
276 struct gk20a_channel_fence *fence) 252 struct gk20a_fence **fence)
277{ 253{
278 struct gk20a_channel_syncpt *sp = 254 struct gk20a_channel_syncpt *sp =
279 container_of(s, struct gk20a_channel_syncpt, ops); 255 container_of(s, struct gk20a_channel_syncpt, ops);
@@ -286,58 +262,36 @@ int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
286 entry, fence); 262 entry, fence);
287} 263}
288 264
289int gk20a_channel_syncpt_incr_user_syncpt(struct gk20a_channel_sync *s, 265int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
290 struct priv_cmd_entry **entry, 266 int wait_fence_fd,
291 struct gk20a_channel_fence *fence, 267 struct priv_cmd_entry **entry,
292 bool wfi, 268 struct gk20a_fence **fence,
293 u32 *id, u32 *thresh) 269 bool wfi)
294{ 270{
295 struct gk20a_channel_syncpt *sp = 271 struct gk20a_channel_syncpt *sp =
296 container_of(s, struct gk20a_channel_syncpt, ops); 272 container_of(s, struct gk20a_channel_syncpt, ops);
297 /* Need to do 'host incr + wfi' or 'gfx incr' since we return the fence 273 /* Need to do 'host incr + wfi' or 'gfx incr' since we return the fence
298 * to user space. */ 274 * to user space. */
299 int err = __gk20a_channel_syncpt_incr(s, 275 return __gk20a_channel_syncpt_incr(s,
300 wfi && 276 wfi &&
301 sp->c->obj_class == KEPLER_C /* use gfx class? */, 277 sp->c->obj_class == KEPLER_C /* use gfx class? */,
302 wfi && 278 wfi &&
303 sp->c->obj_class != KEPLER_C /* wfi if host class */, 279 sp->c->obj_class != KEPLER_C /* wfi if host class */,
304 true /* register irq */, 280 true /* register irq */,
305 entry, fence); 281 entry, fence);
306 if (err)
307 return err;
308 *id = sp->id;
309 *thresh = fence->thresh;
310 return 0;
311} 282}
312 283
313int gk20a_channel_syncpt_incr_user_fd(struct gk20a_channel_sync *s, 284void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
314 int wait_fence_fd,
315 struct priv_cmd_entry **entry,
316 struct gk20a_channel_fence *fence,
317 bool wfi,
318 int *fd)
319{ 285{
320#ifdef CONFIG_SYNC
321 int err;
322 struct nvhost_ctrl_sync_fence_info pt;
323 struct gk20a_channel_syncpt *sp = 286 struct gk20a_channel_syncpt *sp =
324 container_of(s, struct gk20a_channel_syncpt, ops); 287 container_of(s, struct gk20a_channel_syncpt, ops);
325 err = gk20a_channel_syncpt_incr_user_syncpt(s, entry, fence, wfi, 288 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
326 &pt.id, &pt.thresh);
327 if (err)
328 return err;
329 return nvhost_sync_create_fence_fd(sp->host1x_pdev, &pt, 1,
330 "fence", fd);
331#else
332 return -ENODEV;
333#endif
334} 289}
335 290
336void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s) 291static void gk20a_channel_syncpt_signal_timeline(
292 struct gk20a_channel_sync *s)
337{ 293{
338 struct gk20a_channel_syncpt *sp = 294 /* Nothing to do. */
339 container_of(s, struct gk20a_channel_syncpt, ops);
340 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
341} 295}
342 296
343static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s) 297static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
@@ -366,15 +320,13 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
366 return NULL; 320 return NULL;
367 } 321 }
368 322
369 sp->ops.wait_cpu = gk20a_channel_syncpt_wait_cpu;
370 sp->ops.is_expired = gk20a_channel_syncpt_is_expired;
371 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; 323 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt;
372 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd; 324 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd;
373 sp->ops.incr = gk20a_channel_syncpt_incr; 325 sp->ops.incr = gk20a_channel_syncpt_incr;
374 sp->ops.incr_wfi = gk20a_channel_syncpt_incr_wfi; 326 sp->ops.incr_wfi = gk20a_channel_syncpt_incr_wfi;
375 sp->ops.incr_user_syncpt = gk20a_channel_syncpt_incr_user_syncpt; 327 sp->ops.incr_user = gk20a_channel_syncpt_incr_user;
376 sp->ops.incr_user_fd = gk20a_channel_syncpt_incr_user_fd;
377 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max; 328 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max;
329 sp->ops.signal_timeline = gk20a_channel_syncpt_signal_timeline;
378 sp->ops.destroy = gk20a_channel_syncpt_destroy; 330 sp->ops.destroy = gk20a_channel_syncpt_destroy;
379 331
380 sp->ops.aggressive_destroy = true; 332 sp->ops.aggressive_destroy = true;
@@ -460,48 +412,10 @@ static int add_sema_cmd(u32 *ptr, u64 sema, u32 payload,
460 return i; 412 return i;
461} 413}
462 414
463static int gk20a_channel_semaphore_wait_cpu(
464 struct gk20a_channel_sync *s,
465 struct gk20a_channel_fence *fence,
466 int timeout)
467{
468 int remain;
469 struct gk20a_channel_semaphore *sp =
470 container_of(s, struct gk20a_channel_semaphore, ops);
471 if (!fence->valid || WARN_ON(!fence->semaphore))
472 return 0;
473
474 remain = wait_event_interruptible_timeout(
475 sp->c->semaphore_wq,
476 !gk20a_semaphore_is_acquired(fence->semaphore),
477 timeout);
478 if (remain == 0 && gk20a_semaphore_is_acquired(fence->semaphore))
479 return -ETIMEDOUT;
480 else if (remain < 0)
481 return remain;
482 return 0;
483}
484
485static bool gk20a_channel_semaphore_is_expired(
486 struct gk20a_channel_sync *s,
487 struct gk20a_channel_fence *fence)
488{
489 bool expired;
490 struct gk20a_channel_semaphore *sp =
491 container_of(s, struct gk20a_channel_semaphore, ops);
492 if (!fence->valid || WARN_ON(!fence->semaphore))
493 return true;
494
495 expired = !gk20a_semaphore_is_acquired(fence->semaphore);
496 if (expired)
497 gk20a_sync_timeline_signal(sp->timeline);
498 return expired;
499}
500
501static int gk20a_channel_semaphore_wait_syncpt( 415static int gk20a_channel_semaphore_wait_syncpt(
502 struct gk20a_channel_sync *s, u32 id, 416 struct gk20a_channel_sync *s, u32 id,
503 u32 thresh, struct priv_cmd_entry **entry, 417 u32 thresh, struct priv_cmd_entry **entry,
504 struct gk20a_channel_fence *fence) 418 struct gk20a_fence **fence)
505{ 419{
506 struct gk20a_channel_semaphore *sema = 420 struct gk20a_channel_semaphore *sema =
507 container_of(s, struct gk20a_channel_semaphore, ops); 421 container_of(s, struct gk20a_channel_semaphore, ops);
@@ -513,7 +427,7 @@ static int gk20a_channel_semaphore_wait_syncpt(
513static int gk20a_channel_semaphore_wait_fd( 427static int gk20a_channel_semaphore_wait_fd(
514 struct gk20a_channel_sync *s, int fd, 428 struct gk20a_channel_sync *s, int fd,
515 struct priv_cmd_entry **entry, 429 struct priv_cmd_entry **entry,
516 struct gk20a_channel_fence *fence) 430 struct gk20a_fence **fence)
517{ 431{
518 struct gk20a_channel_semaphore *sema = 432 struct gk20a_channel_semaphore *sema =
519 container_of(s, struct gk20a_channel_semaphore, ops); 433 container_of(s, struct gk20a_channel_semaphore, ops);
@@ -558,6 +472,11 @@ static int gk20a_channel_semaphore_wait_fd(
558 WARN_ON(written != wait_cmd->size); 472 WARN_ON(written != wait_cmd->size);
559 sync_fence_wait_async(sync_fence, &w->waiter); 473 sync_fence_wait_async(sync_fence, &w->waiter);
560 474
475 /* XXX - this fixes an actual bug, we need to hold a ref to this
476 semaphore while the job is in flight. */
477 *fence = gk20a_fence_from_semaphore(sema->timeline, w->sema,
478 &c->semaphore_wq,
479 NULL, false);
561 *entry = wait_cmd; 480 *entry = wait_cmd;
562 return 0; 481 return 0;
563fail: 482fail:
@@ -575,8 +494,9 @@ fail:
575 494
576static int __gk20a_channel_semaphore_incr( 495static int __gk20a_channel_semaphore_incr(
577 struct gk20a_channel_sync *s, bool wfi_cmd, 496 struct gk20a_channel_sync *s, bool wfi_cmd,
497 struct sync_fence *dependency,
578 struct priv_cmd_entry **entry, 498 struct priv_cmd_entry **entry,
579 struct gk20a_channel_fence *fence) 499 struct gk20a_fence **fence)
580{ 500{
581 u64 va; 501 u64 va;
582 int incr_cmd_size; 502 int incr_cmd_size;
@@ -608,9 +528,9 @@ static int __gk20a_channel_semaphore_incr(
608 written = add_sema_cmd(incr_cmd->ptr, va, 1, false, wfi_cmd); 528 written = add_sema_cmd(incr_cmd->ptr, va, 1, false, wfi_cmd);
609 WARN_ON(written != incr_cmd_size); 529 WARN_ON(written != incr_cmd_size);
610 530
611 fence->valid = true; 531 *fence = gk20a_fence_from_semaphore(sp->timeline, semaphore,
612 fence->wfi = wfi_cmd; 532 &c->semaphore_wq,
613 fence->semaphore = semaphore; 533 dependency, wfi_cmd);
614 *entry = incr_cmd; 534 *entry = incr_cmd;
615 return 0; 535 return 0;
616} 536}
@@ -618,72 +538,54 @@ static int __gk20a_channel_semaphore_incr(
618static int gk20a_channel_semaphore_incr_wfi( 538static int gk20a_channel_semaphore_incr_wfi(
619 struct gk20a_channel_sync *s, 539 struct gk20a_channel_sync *s,
620 struct priv_cmd_entry **entry, 540 struct priv_cmd_entry **entry,
621 struct gk20a_channel_fence *fence) 541 struct gk20a_fence **fence)
622{ 542{
623 return __gk20a_channel_semaphore_incr(s, 543 return __gk20a_channel_semaphore_incr(s,
624 true /* wfi */, 544 true /* wfi */,
545 NULL,
625 entry, fence); 546 entry, fence);
626} 547}
627 548
628static int gk20a_channel_semaphore_incr( 549static int gk20a_channel_semaphore_incr(
629 struct gk20a_channel_sync *s, 550 struct gk20a_channel_sync *s,
630 struct priv_cmd_entry **entry, 551 struct priv_cmd_entry **entry,
631 struct gk20a_channel_fence *fence) 552 struct gk20a_fence **fence)
632{ 553{
633 /* Don't put wfi cmd to this one since we're not returning 554 /* Don't put wfi cmd to this one since we're not returning
634 * a fence to user space. */ 555 * a fence to user space. */
635 return __gk20a_channel_semaphore_incr(s, false /* no wfi */, 556 return __gk20a_channel_semaphore_incr(s, false /* no wfi */,
636 entry, fence); 557 NULL, entry, fence);
637}
638
639static int gk20a_channel_semaphore_incr_user_syncpt(
640 struct gk20a_channel_sync *s,
641 struct priv_cmd_entry **entry,
642 struct gk20a_channel_fence *fence,
643 bool wfi,
644 u32 *id, u32 *thresh)
645{
646 struct gk20a_channel_semaphore *sema =
647 container_of(s, struct gk20a_channel_semaphore, ops);
648 struct device *dev = dev_from_gk20a(sema->c->g);
649 gk20a_err(dev, "trying to use syncpoint synchronization");
650 return -ENODEV;
651} 558}
652 559
653static int gk20a_channel_semaphore_incr_user_fd( 560static int gk20a_channel_semaphore_incr_user(
654 struct gk20a_channel_sync *s, 561 struct gk20a_channel_sync *s,
655 int wait_fence_fd, 562 int wait_fence_fd,
656 struct priv_cmd_entry **entry, 563 struct priv_cmd_entry **entry,
657 struct gk20a_channel_fence *fence, 564 struct gk20a_fence **fence,
658 bool wfi, 565 bool wfi)
659 int *fd)
660{ 566{
661 struct gk20a_channel_semaphore *sema =
662 container_of(s, struct gk20a_channel_semaphore, ops);
663#ifdef CONFIG_SYNC 567#ifdef CONFIG_SYNC
664 struct sync_fence *dependency = NULL; 568 struct sync_fence *dependency = NULL;
665 int err; 569 int err;
666 570
667 err = __gk20a_channel_semaphore_incr(s, wfi,
668 entry, fence);
669 if (err)
670 return err;
671
672 if (wait_fence_fd >= 0) { 571 if (wait_fence_fd >= 0) {
673 dependency = gk20a_sync_fence_fdget(wait_fence_fd); 572 dependency = gk20a_sync_fence_fdget(wait_fence_fd);
674 if (!dependency) 573 if (!dependency)
675 return -EINVAL; 574 return -EINVAL;
676 } 575 }
677 576
678 *fd = gk20a_sync_fence_create(sema->timeline, fence->semaphore, 577 err = __gk20a_channel_semaphore_incr(s, wfi, dependency,
679 dependency, "fence"); 578 entry, fence);
680 if (*fd < 0) { 579 if (err) {
681 if (dependency) 580 if (dependency)
682 sync_fence_put(dependency); 581 sync_fence_put(dependency);
683 return *fd; 582 return err;
684 } 583 }
584
685 return 0; 585 return 0;
686#else 586#else
587 struct gk20a_channel_semaphore *sema =
588 container_of(s, struct gk20a_channel_semaphore, ops);
687 gk20a_err(dev_from_gk20a(sema->c->g), 589 gk20a_err(dev_from_gk20a(sema->c->g),
688 "trying to use sync fds with CONFIG_SYNC disabled"); 590 "trying to use sync fds with CONFIG_SYNC disabled");
689 return -ENODEV; 591 return -ENODEV;
@@ -695,6 +597,14 @@ static void gk20a_channel_semaphore_set_min_eq_max(struct gk20a_channel_sync *s)
695 /* Nothing to do. */ 597 /* Nothing to do. */
696} 598}
697 599
600static void gk20a_channel_semaphore_signal_timeline(
601 struct gk20a_channel_sync *s)
602{
603 struct gk20a_channel_semaphore *sp =
604 container_of(s, struct gk20a_channel_semaphore, ops);
605 gk20a_sync_timeline_signal(sp->timeline);
606}
607
698static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s) 608static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
699{ 609{
700 struct gk20a_channel_semaphore *sema = 610 struct gk20a_channel_semaphore *sema =
@@ -746,15 +656,13 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
746 if (!sema->timeline) 656 if (!sema->timeline)
747 goto clean_up; 657 goto clean_up;
748#endif 658#endif
749 sema->ops.wait_cpu = gk20a_channel_semaphore_wait_cpu;
750 sema->ops.is_expired = gk20a_channel_semaphore_is_expired;
751 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; 659 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt;
752 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; 660 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd;
753 sema->ops.incr = gk20a_channel_semaphore_incr; 661 sema->ops.incr = gk20a_channel_semaphore_incr;
754 sema->ops.incr_wfi = gk20a_channel_semaphore_incr_wfi; 662 sema->ops.incr_wfi = gk20a_channel_semaphore_incr_wfi;
755 sema->ops.incr_user_syncpt = gk20a_channel_semaphore_incr_user_syncpt; 663 sema->ops.incr_user = gk20a_channel_semaphore_incr_user;
756 sema->ops.incr_user_fd = gk20a_channel_semaphore_incr_user_fd;
757 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max; 664 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
665 sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
758 sema->ops.destroy = gk20a_channel_semaphore_destroy; 666 sema->ops.destroy = gk20a_channel_semaphore_destroy;
759 667
760 /* Aggressively destroying the semaphore sync would cause overhead 668 /* Aggressively destroying the semaphore sync would cause overhead
@@ -775,26 +683,3 @@ struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c)
775#endif 683#endif
776 return gk20a_channel_semaphore_create(c); 684 return gk20a_channel_semaphore_create(c);
777} 685}
778
779static inline bool gk20a_channel_fence_is_closed(struct gk20a_channel_fence *f)
780{
781 if (f->valid || f->semaphore)
782 return false;
783 return true;
784}
785
786void gk20a_channel_fence_close(struct gk20a_channel_fence *f)
787{
788 if (f->semaphore)
789 gk20a_semaphore_put(f->semaphore);
790 memset(f, 0, sizeof(*f));
791}
792
793void gk20a_channel_fence_dup(struct gk20a_channel_fence *from,
794 struct gk20a_channel_fence *to)
795{
796 WARN_ON(!gk20a_channel_fence_is_closed(to));
797 *to = *from;
798 if (to->semaphore)
799 gk20a_semaphore_get(to->semaphore);
800}