summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-03-23 06:02:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-04-23 00:04:48 -0400
commitd0e4dfd6efd651abc431aba9cfae5907638f8172 (patch)
tree1a412eaa4636ff2e2862c1623bad3a5ea6883d4e
parentc918c42a4a3651f757c6966aead4b07eb4b56697 (diff)
gpu: nvgpu: sync_framework cleanups
This patch deals with cleanups meant to make things simpler for the upcoming os abstraction patches for the sync framework. This patch causes some substantial changes which are listed out as follows. 1) sync_timeline is moved out of gk20a_fence into struct nvgpu_channel_linux. New function pointers are created to facilitate os independent methods for enabling/disabling timeline and are now named as os_fence_framework. These function pointers are located in the struct os_channel under struct gk20a. 2) construction of the channel_sync require nvgpu_finalize_poweron_linux() to be invoked before invocations to nvgpu_init_mm_ce_context(). Hence, these methods are now moved away from gk20a_finalize_poweron() and invoked after nvgpu_finalize_poweron_linux(). 3) sync_fence creation is now delinked from fence construction and move to the channel_sync_gk20a's channel_incr methods. These sync_fences are mainly associated with post_fences. 4) In case userspace requires the sync_fences to be constructed, we try to obtain an fd before the gk20a_channel_submit_gpfifo() instead of trying to do that later. This is used to avoid potential after effects of duplicate work submission due to failure to obtain an unused fd. JIRA NVGPU-66 Change-Id: I42a3e4e2e692a113b1b36d2b48ab107ae4444dfa Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1678400 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c79
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.h15
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c21
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c21
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c102
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h7
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c101
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.h16
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h5
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.h9
15 files changed, 275 insertions, 149 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index a360d0df..8f2adc3a 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -40,6 +40,8 @@
40#include <trace/events/gk20a.h> 40#include <trace/events/gk20a.h>
41#include <uapi/linux/nvgpu.h> 41#include <uapi/linux/nvgpu.h>
42 42
43#include "gk20a/sync_gk20a.h"
44
43u32 nvgpu_submit_gpfifo_user_flags_to_common_flags(u32 user_flags) 45u32 nvgpu_submit_gpfifo_user_flags_to_common_flags(u32 user_flags)
44{ 46{
45 u32 flags = 0; 47 u32 flags = 0;
@@ -292,6 +294,10 @@ static int nvgpu_channel_alloc_linux(struct gk20a *g, struct channel_gk20a *ch)
292 ch->os_priv = priv; 294 ch->os_priv = priv;
293 priv->ch = ch; 295 priv->ch = ch;
294 296
297#ifdef CONFIG_SYNC
298 ch->has_os_fence_framework_support = true;
299#endif
300
295 err = nvgpu_mutex_init(&priv->error_notifier.mutex); 301 err = nvgpu_mutex_init(&priv->error_notifier.mutex);
296 if (err) { 302 if (err) {
297 nvgpu_kfree(g, priv); 303 nvgpu_kfree(g, priv);
@@ -309,6 +315,64 @@ static void nvgpu_channel_free_linux(struct gk20a *g, struct channel_gk20a *ch)
309 315
310 nvgpu_mutex_destroy(&priv->error_notifier.mutex); 316 nvgpu_mutex_destroy(&priv->error_notifier.mutex);
311 nvgpu_kfree(g, priv); 317 nvgpu_kfree(g, priv);
318
319 ch->os_priv = NULL;
320
321#ifdef CONFIG_SYNC
322 ch->has_os_fence_framework_support = false;
323#endif
324}
325
326static int nvgpu_channel_init_os_fence_framework(struct channel_gk20a *ch,
327 const char *fmt, ...)
328{
329 struct nvgpu_channel_linux *priv = ch->os_priv;
330 struct nvgpu_os_fence_framework *fence_framework;
331 char name[30];
332 va_list args;
333
334 fence_framework = &priv->fence_framework;
335
336 va_start(args, fmt);
337 vsnprintf(name, sizeof(name), fmt, args);
338 va_end(args);
339
340 fence_framework->timeline = gk20a_sync_timeline_create(name);
341
342 if (!fence_framework->timeline)
343 return -EINVAL;
344
345 return 0;
346}
347static void nvgpu_channel_signal_os_fence_framework(struct channel_gk20a *ch)
348{
349 struct nvgpu_channel_linux *priv = ch->os_priv;
350 struct nvgpu_os_fence_framework *fence_framework;
351
352 fence_framework = &priv->fence_framework;
353
354 gk20a_sync_timeline_signal(fence_framework->timeline);
355}
356
357static void nvgpu_channel_destroy_os_fence_framework(struct channel_gk20a *ch)
358{
359 struct nvgpu_channel_linux *priv = ch->os_priv;
360 struct nvgpu_os_fence_framework *fence_framework;
361
362 fence_framework = &priv->fence_framework;
363
364 gk20a_sync_timeline_destroy(fence_framework->timeline);
365 fence_framework->timeline = NULL;
366}
367
368static bool nvgpu_channel_fence_framework_exists(struct channel_gk20a *ch)
369{
370 struct nvgpu_channel_linux *priv = ch->os_priv;
371 struct nvgpu_os_fence_framework *fence_framework;
372
373 fence_framework = &priv->fence_framework;
374
375 return (fence_framework->timeline != NULL);
312} 376}
313 377
314int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l) 378int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l)
@@ -332,6 +396,16 @@ int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l)
332 nvgpu_channel_work_completion_signal; 396 nvgpu_channel_work_completion_signal;
333 g->os_channel.work_completion_cancel_sync = 397 g->os_channel.work_completion_cancel_sync =
334 nvgpu_channel_work_completion_cancel_sync; 398 nvgpu_channel_work_completion_cancel_sync;
399
400 g->os_channel.os_fence_framework_inst_exists =
401 nvgpu_channel_fence_framework_exists;
402 g->os_channel.init_os_fence_framework =
403 nvgpu_channel_init_os_fence_framework;
404 g->os_channel.signal_os_fence_framework =
405 nvgpu_channel_signal_os_fence_framework;
406 g->os_channel.destroy_os_fence_framework =
407 nvgpu_channel_destroy_os_fence_framework;
408
335 return 0; 409 return 0;
336 410
337err_clean: 411err_clean:
@@ -354,6 +428,11 @@ void nvgpu_remove_channel_support_linux(struct nvgpu_os_linux *l)
354 428
355 nvgpu_channel_free_linux(g, ch); 429 nvgpu_channel_free_linux(g, ch);
356 } 430 }
431
432 g->os_channel.os_fence_framework_inst_exists = NULL;
433 g->os_channel.init_os_fence_framework = NULL;
434 g->os_channel.signal_os_fence_framework = NULL;
435 g->os_channel.destroy_os_fence_framework = NULL;
357} 436}
358 437
359u32 nvgpu_get_gpfifo_entry_size(void) 438u32 nvgpu_get_gpfifo_entry_size(void)
diff --git a/drivers/gpu/nvgpu/common/linux/channel.h b/drivers/gpu/nvgpu/common/linux/channel.h
index d4cb6d55..805de55a 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.h
+++ b/drivers/gpu/nvgpu/common/linux/channel.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,9 @@ struct gk20a_fence;
29struct fifo_profile_gk20a; 29struct fifo_profile_gk20a;
30struct nvgpu_os_linux; 30struct nvgpu_os_linux;
31 31
32struct sync_fence;
33struct sync_timeline;
34
32struct nvgpu_channel_completion_cb { 35struct nvgpu_channel_completion_cb {
33 /* 36 /*
34 * Signal channel owner via a callback, if set, in job cleanup with 37 * Signal channel owner via a callback, if set, in job cleanup with
@@ -52,9 +55,19 @@ struct nvgpu_error_notifier {
52 struct nvgpu_mutex mutex; 55 struct nvgpu_mutex mutex;
53}; 56};
54 57
58/*
59 * This struct contains fence_related data.
60 * e.g. sync_timeline for sync_fences.
61 */
62struct nvgpu_os_fence_framework {
63 struct sync_timeline *timeline;
64};
65
55struct nvgpu_channel_linux { 66struct nvgpu_channel_linux {
56 struct channel_gk20a *ch; 67 struct channel_gk20a *ch;
57 68
69 struct nvgpu_os_fence_framework fence_framework;
70
58 struct nvgpu_channel_completion_cb completion_cb; 71 struct nvgpu_channel_completion_cb completion_cb;
59 struct nvgpu_error_notifier error_notifier; 72 struct nvgpu_error_notifier error_notifier;
60 73
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index b4d7d501..06dfb180 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -774,6 +774,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
774 struct gk20a_fence *fence_out; 774 struct gk20a_fence *fence_out;
775 struct fifo_profile_gk20a *profile = NULL; 775 struct fifo_profile_gk20a *profile = NULL;
776 u32 submit_flags = 0; 776 u32 submit_flags = 0;
777 int fd = -1;
777 778
778 int ret = 0; 779 int ret = 0;
779 gk20a_dbg_fn(""); 780 gk20a_dbg_fn("");
@@ -794,19 +795,31 @@ static int gk20a_ioctl_channel_submit_gpfifo(
794 nvgpu_get_fence_args(&args->fence, &fence); 795 nvgpu_get_fence_args(&args->fence, &fence);
795 submit_flags = 796 submit_flags =
796 nvgpu_submit_gpfifo_user_flags_to_common_flags(args->flags); 797 nvgpu_submit_gpfifo_user_flags_to_common_flags(args->flags);
798
799 /* Try and allocate an fd here*/
800 if ((args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)
801 && (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE)) {
802 fd = get_unused_fd_flags(O_RDWR);
803 if (fd < 0)
804 return fd;
805 }
806
797 ret = gk20a_submit_channel_gpfifo(ch, NULL, args, args->num_entries, 807 ret = gk20a_submit_channel_gpfifo(ch, NULL, args, args->num_entries,
798 submit_flags, &fence, 808 submit_flags, &fence,
799 &fence_out, false, profile); 809 &fence_out, false, profile);
800 810
801 if (ret) 811 if (ret) {
812 if (fd != -1)
813 put_unused_fd(fd);
802 goto clean_up; 814 goto clean_up;
815 }
803 816
804 /* Convert fence_out to something we can pass back to user space. */ 817 /* Convert fence_out to something we can pass back to user space. */
805 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) { 818 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
806 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 819 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
807 int fd = gk20a_fence_install_fd(fence_out); 820 ret = gk20a_fence_install_fd(fence_out, fd);
808 if (fd < 0) 821 if (ret)
809 ret = fd; 822 put_unused_fd(fd);
810 else 823 else
811 args->fence.id = fd; 824 args->fence.id = fd;
812 } else { 825 } else {
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index e4b66460..70707a5c 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -344,10 +344,19 @@ static int gk20a_ctrl_prepare_compressible_read(
344 struct gk20a_fence *fence_out = NULL; 344 struct gk20a_fence *fence_out = NULL;
345 int submit_flags = nvgpu_submit_gpfifo_user_flags_to_common_flags( 345 int submit_flags = nvgpu_submit_gpfifo_user_flags_to_common_flags(
346 args->submit_flags); 346 args->submit_flags);
347 int fd = -1;
347 348
348 fence.id = args->fence.syncpt_id; 349 fence.id = args->fence.syncpt_id;
349 fence.value = args->fence.syncpt_value; 350 fence.value = args->fence.syncpt_value;
350 351
352 /* Try and allocate an fd here*/
353 if ((submit_flags & NVGPU_SUBMIT_FLAGS_FENCE_GET)
354 && (submit_flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE)) {
355 fd = get_unused_fd_flags(O_RDWR);
356 if (fd < 0)
357 return fd;
358 }
359
351 ret = gk20a_prepare_compressible_read(l, args->handle, 360 ret = gk20a_prepare_compressible_read(l, args->handle,
352 args->request_compbits, args->offset, 361 args->request_compbits, args->offset,
353 args->compbits_hoffset, args->compbits_voffset, 362 args->compbits_hoffset, args->compbits_voffset,
@@ -356,20 +365,24 @@ static int gk20a_ctrl_prepare_compressible_read(
356 submit_flags, &fence, &args->valid_compbits, 365 submit_flags, &fence, &args->valid_compbits,
357 &args->zbc_color, &fence_out); 366 &args->zbc_color, &fence_out);
358 367
359 if (ret) 368 if (ret) {
369 if (fd != -1)
370 put_unused_fd(fd);
360 return ret; 371 return ret;
372 }
361 373
362 /* Convert fence_out to something we can pass back to user space. */ 374 /* Convert fence_out to something we can pass back to user space. */
363 if (submit_flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) { 375 if (submit_flags & NVGPU_SUBMIT_FLAGS_FENCE_GET) {
364 if (submit_flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) { 376 if (submit_flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE) {
365 if (fence_out) { 377 if (fence_out) {
366 int fd = gk20a_fence_install_fd(fence_out); 378 ret = gk20a_fence_install_fd(fence_out, fd);
367 if (fd < 0) 379 if (ret)
368 ret = fd; 380 put_unused_fd(fd);
369 else 381 else
370 args->fence.fd = fd; 382 args->fence.fd = fd;
371 } else { 383 } else {
372 args->fence.fd = -1; 384 args->fence.fd = -1;
385 put_unused_fd(fd);
373 } 386 }
374 } else { 387 } else {
375 if (fence_out) { 388 if (fence_out) {
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c
index b9c9554b..81b3db82 100644
--- a/drivers/gpu/nvgpu/common/linux/module.c
+++ b/drivers/gpu/nvgpu/common/linux/module.c
@@ -40,6 +40,7 @@
40#include <nvgpu/enabled.h> 40#include <nvgpu/enabled.h>
41#include <nvgpu/debug.h> 41#include <nvgpu/debug.h>
42#include <nvgpu/ctxsw_trace.h> 42#include <nvgpu/ctxsw_trace.h>
43#include <nvgpu/vidmem.h>
43 44
44#include "platform_gk20a.h" 45#include "platform_gk20a.h"
45#include "sysfs.h" 46#include "sysfs.h"
@@ -252,13 +253,22 @@ int gk20a_pm_finalize_poweron(struct device *dev)
252 return err; 253 return err;
253 254
254 err = gk20a_finalize_poweron(g); 255 err = gk20a_finalize_poweron(g);
255 set_user_nice(current, nice_value); 256 if (err) {
256 if (err) 257 set_user_nice(current, nice_value);
257 goto done; 258 goto done;
259 }
258 260
259 err = nvgpu_finalize_poweron_linux(l); 261 err = nvgpu_finalize_poweron_linux(l);
260 if (err) 262 if (err) {
263 set_user_nice(current, nice_value);
261 goto done; 264 goto done;
265 }
266
267 nvgpu_init_mm_ce_context(g);
268
269 nvgpu_vidmem_thread_unpause(&g->mm);
270
271 set_user_nice(current, nice_value);
262 272
263 /* Initialise scaling: it will initialize scaling drive only once */ 273 /* Initialise scaling: it will initialize scaling drive only once */
264 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ) && 274 if (IS_ENABLED(CONFIG_GK20A_DEVFREQ) &&
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 2f5514a8..48677529 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1894,7 +1894,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1894 WARN_ON(!c->sync); 1894 WARN_ON(!c->sync);
1895 1895
1896 if (c->sync) { 1896 if (c->sync) {
1897 c->sync->signal_timeline(c->sync); 1897 if (c->has_os_fence_framework_support &&
1898 g->os_channel.os_fence_framework_inst_exists(c))
1899 g->os_channel.signal_os_fence_framework(c);
1898 1900
1899 if (g->aggressive_sync_destroy_thresh) { 1901 if (g->aggressive_sync_destroy_thresh) {
1900 nvgpu_mutex_acquire(&c->sync_lock); 1902 nvgpu_mutex_acquire(&c->sync_lock);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 2b8be069..5e8cab0d 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -280,6 +280,8 @@ struct channel_gk20a {
280 struct gk20a_channel_sync *sync; 280 struct gk20a_channel_sync *sync;
281 struct gk20a_channel_sync *user_sync; 281 struct gk20a_channel_sync *user_sync;
282 282
283 bool has_os_fence_framework_support;
284
283#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 285#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
284 u64 virt_ctx; 286 u64 virt_ctx;
285#endif 287#endif
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index bf467210..c0e035ea 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -183,6 +183,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
183 struct gk20a_channel_syncpt *sp = 183 struct gk20a_channel_syncpt *sp =
184 container_of(s, struct gk20a_channel_syncpt, ops); 184 container_of(s, struct gk20a_channel_syncpt, ops);
185 struct channel_gk20a *c = sp->c; 185 struct channel_gk20a *c = sp->c;
186 struct sync_fence *sync_fence = NULL;
186 187
187 err = gk20a_channel_alloc_priv_cmdbuf(c, 188 err = gk20a_channel_alloc_priv_cmdbuf(c,
188 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd), 189 c->g->ops.fifo.get_syncpt_incr_cmd_size(wfi_cmd),
@@ -224,10 +225,28 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
224 } 225 }
225 } 226 }
226 227
227 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev, sp->id, thresh, 228#ifdef CONFIG_SYNC
228 need_sync_fence); 229 if (need_sync_fence) {
229 if (err) 230 sync_fence = nvgpu_nvhost_sync_create_fence(sp->nvhost_dev,
231 sp->id, thresh, "fence");
232
233 if (IS_ERR(sync_fence)) {
234 err = PTR_ERR(sync_fence);
235 goto clean_up_priv_cmd;
236 }
237 }
238#endif
239
240 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev,
241 sp->id, thresh, sync_fence);
242
243 if (err) {
244#ifdef CONFIG_SYNC
245 if (sync_fence)
246 sync_fence_put(sync_fence);
247#endif
230 goto clean_up_priv_cmd; 248 goto clean_up_priv_cmd;
249 }
231 250
232 return 0; 251 return 0;
233 252
@@ -290,12 +309,6 @@ static void gk20a_channel_syncpt_set_safe_state(struct gk20a_channel_sync *s)
290 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id); 309 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id);
291} 310}
292 311
293static void gk20a_channel_syncpt_signal_timeline(
294 struct gk20a_channel_sync *s)
295{
296 /* Nothing to do. */
297}
298
299static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s) 312static int gk20a_channel_syncpt_id(struct gk20a_channel_sync *s)
300{ 313{
301 struct gk20a_channel_syncpt *sp = 314 struct gk20a_channel_syncpt *sp =
@@ -368,7 +381,6 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c, bool user_managed)
368 sp->ops.incr_user = gk20a_channel_syncpt_incr_user; 381 sp->ops.incr_user = gk20a_channel_syncpt_incr_user;
369 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max; 382 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max;
370 sp->ops.set_safe_state = gk20a_channel_syncpt_set_safe_state; 383 sp->ops.set_safe_state = gk20a_channel_syncpt_set_safe_state;
371 sp->ops.signal_timeline = gk20a_channel_syncpt_signal_timeline;
372 sp->ops.syncpt_id = gk20a_channel_syncpt_id; 384 sp->ops.syncpt_id = gk20a_channel_syncpt_id;
373 sp->ops.syncpt_address = gk20a_channel_syncpt_address; 385 sp->ops.syncpt_address = gk20a_channel_syncpt_address;
374 sp->ops.destroy = gk20a_channel_syncpt_destroy; 386 sp->ops.destroy = gk20a_channel_syncpt_destroy;
@@ -383,9 +395,6 @@ struct gk20a_channel_semaphore {
383 395
384 /* A semaphore pool owned by this channel. */ 396 /* A semaphore pool owned by this channel. */
385 struct nvgpu_semaphore_pool *pool; 397 struct nvgpu_semaphore_pool *pool;
386
387 /* A sync timeline that advances when gpu completes work. */
388 struct sync_timeline *timeline;
389}; 398};
390 399
391static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c, 400static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
@@ -560,6 +569,7 @@ static int __gk20a_channel_semaphore_incr(
560 struct channel_gk20a *c = sp->c; 569 struct channel_gk20a *c = sp->c;
561 struct nvgpu_semaphore *semaphore; 570 struct nvgpu_semaphore *semaphore;
562 int err = 0; 571 int err = 0;
572 struct sync_fence *sync_fence = NULL;
563 573
564 semaphore = nvgpu_semaphore_alloc(c); 574 semaphore = nvgpu_semaphore_alloc(c);
565 if (!semaphore) { 575 if (!semaphore) {
@@ -579,12 +589,31 @@ static int __gk20a_channel_semaphore_incr(
579 /* Release the completion semaphore. */ 589 /* Release the completion semaphore. */
580 add_sema_cmd(c->g, c, semaphore, incr_cmd, 0, false, wfi_cmd); 590 add_sema_cmd(c->g, c, semaphore, incr_cmd, 0, false, wfi_cmd);
581 591
582 err = gk20a_fence_from_semaphore(c->g, fence, 592#ifdef CONFIG_SYNC
583 sp->timeline, semaphore, 593 if (need_sync_fence) {
584 &c->semaphore_wq, 594 sync_fence = gk20a_sync_fence_create(c,
585 need_sync_fence); 595 semaphore, "f-gk20a-0x%04x",
586 if (err) 596 nvgpu_semaphore_gpu_ro_va(semaphore));
597
598 if (!sync_fence) {
599 err = -ENOMEM;
600 goto clean_up_sema;
601 }
602 }
603#endif
604
605 err = gk20a_fence_from_semaphore(fence,
606 semaphore,
607 &c->semaphore_wq,
608 sync_fence);
609
610 if (err) {
611#ifdef CONFIG_SYNC
612 if (sync_fence)
613 sync_fence_put(sync_fence);
614#endif
587 goto clean_up_sema; 615 goto clean_up_sema;
616 }
588 617
589 return 0; 618 return 0;
590 619
@@ -665,14 +694,6 @@ static void gk20a_channel_semaphore_set_safe_state(struct gk20a_channel_sync *s)
665 /* Nothing to do. */ 694 /* Nothing to do. */
666} 695}
667 696
668static void gk20a_channel_semaphore_signal_timeline(
669 struct gk20a_channel_sync *s)
670{
671 struct gk20a_channel_semaphore *sp =
672 container_of(s, struct gk20a_channel_semaphore, ops);
673 gk20a_sync_timeline_signal(sp->timeline);
674}
675
676static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s) 697static int gk20a_channel_semaphore_syncpt_id(struct gk20a_channel_sync *s)
677{ 698{
678 return -EINVAL; 699 return -EINVAL;
@@ -687,8 +708,13 @@ static void gk20a_channel_semaphore_destroy(struct gk20a_channel_sync *s)
687{ 708{
688 struct gk20a_channel_semaphore *sema = 709 struct gk20a_channel_semaphore *sema =
689 container_of(s, struct gk20a_channel_semaphore, ops); 710 container_of(s, struct gk20a_channel_semaphore, ops);
690 if (sema->timeline) 711
691 gk20a_sync_timeline_destroy(sema->timeline); 712 struct channel_gk20a *c = sema->c;
713 struct gk20a *g = c->g;
714
715 if (c->has_os_fence_framework_support &&
716 g->os_channel.os_fence_framework_inst_exists(c))
717 g->os_channel.destroy_os_fence_framework(c);
692 718
693 /* The sema pool is cleaned up by the VM destroy. */ 719 /* The sema pool is cleaned up by the VM destroy. */
694 sema->pool = NULL; 720 sema->pool = NULL;
@@ -700,10 +726,10 @@ static struct gk20a_channel_sync *
700gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed) 726gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
701{ 727{
702 struct gk20a_channel_semaphore *sema; 728 struct gk20a_channel_semaphore *sema;
729 struct gk20a *g = c->g;
703 char pool_name[20]; 730 char pool_name[20];
704#ifdef CONFIG_SYNC
705 int asid = -1; 731 int asid = -1;
706#endif 732 int err;
707 733
708 if (WARN_ON(!c->vm)) 734 if (WARN_ON(!c->vm))
709 return NULL; 735 return NULL;
@@ -716,17 +742,20 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
716 sprintf(pool_name, "semaphore_pool-%d", c->chid); 742 sprintf(pool_name, "semaphore_pool-%d", c->chid);
717 sema->pool = c->vm->sema_pool; 743 sema->pool = c->vm->sema_pool;
718 744
719#ifdef CONFIG_SYNC
720 if (c->vm->as_share) 745 if (c->vm->as_share)
721 asid = c->vm->as_share->id; 746 asid = c->vm->as_share->id;
722 747
723 sema->timeline = gk20a_sync_timeline_create( 748 if (c->has_os_fence_framework_support) {
749 /*Init the sync_timeline for this channel */
750 err = g->os_channel.init_os_fence_framework(c,
724 "gk20a_ch%d_as%d", c->chid, asid); 751 "gk20a_ch%d_as%d", c->chid, asid);
725 if (!sema->timeline) { 752
726 gk20a_channel_semaphore_destroy(&sema->ops); 753 if (err) {
727 return NULL; 754 nvgpu_kfree(g, sema);
755 return NULL;
756 }
728 } 757 }
729#endif 758
730 nvgpu_atomic_set(&sema->ops.refcount, 0); 759 nvgpu_atomic_set(&sema->ops.refcount, 0);
731 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; 760 sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt;
732 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; 761 sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd;
@@ -735,7 +764,6 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c, bool user_managed)
735 sema->ops.incr_user = gk20a_channel_semaphore_incr_user; 764 sema->ops.incr_user = gk20a_channel_semaphore_incr_user;
736 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max; 765 sema->ops.set_min_eq_max = gk20a_channel_semaphore_set_min_eq_max;
737 sema->ops.set_safe_state = gk20a_channel_semaphore_set_safe_state; 766 sema->ops.set_safe_state = gk20a_channel_semaphore_set_safe_state;
738 sema->ops.signal_timeline = gk20a_channel_semaphore_signal_timeline;
739 sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id; 767 sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id;
740 sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address; 768 sema->ops.syncpt_address = gk20a_channel_semaphore_syncpt_address;
741 sema->ops.destroy = gk20a_channel_semaphore_destroy; 769 sema->ops.destroy = gk20a_channel_semaphore_destroy;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
index adbecbe1..d63b358f 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * GK20A Channel Synchronization Abstraction 4 * GK20A Channel Synchronization Abstraction
5 * 5 *
6 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 6 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"), 9 * copy of this software and associated documentation files (the "Software"),
@@ -93,11 +93,6 @@ struct gk20a_channel_sync {
93 */ 93 */
94 void (*set_safe_state)(struct gk20a_channel_sync *s); 94 void (*set_safe_state)(struct gk20a_channel_sync *s);
95 95
96 /* Signals the sync timeline (if owned by the gk20a_channel_sync layer).
97 * This should be called when we notice that a gk20a_fence is
98 * expired. */
99 void (*signal_timeline)(struct gk20a_channel_sync *s);
100
101 /* Returns the sync point id or negative number if no syncpt*/ 96 /* Returns the sync point id or negative number if no syncpt*/
102 int (*syncpt_id)(struct gk20a_channel_sync *s); 97 int (*syncpt_id)(struct gk20a_channel_sync *s);
103 98
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index f74afd6e..f0ad773f 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -54,9 +54,10 @@ static void gk20a_fence_free(struct nvgpu_ref *ref)
54 struct gk20a *g = f->g; 54 struct gk20a *g = f->g;
55 55
56#ifdef CONFIG_SYNC 56#ifdef CONFIG_SYNC
57 if (f->sync_fence) 57 if (f->os_fence)
58 sync_fence_put(f->sync_fence); 58 sync_fence_put(f->os_fence);
59#endif 59#endif
60
60 if (f->semaphore) 61 if (f->semaphore)
61 nvgpu_semaphore_put(f->semaphore); 62 nvgpu_semaphore_put(f->semaphore);
62 63
@@ -80,7 +81,7 @@ struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f)
80 return f; 81 return f;
81} 82}
82 83
83static inline bool gk20a_fence_is_valid(struct gk20a_fence *f) 84inline bool gk20a_fence_is_valid(struct gk20a_fence *f)
84{ 85{
85 bool valid = f->valid; 86 bool valid = f->valid;
86 87
@@ -88,6 +89,21 @@ static inline bool gk20a_fence_is_valid(struct gk20a_fence *f)
88 return valid; 89 return valid;
89} 90}
90 91
92int gk20a_fence_install_fd(struct gk20a_fence *f, int fd)
93{
94#ifdef CONFIG_SYNC
95 if (!f || !gk20a_fence_is_valid(f) || !f->os_fence)
96 return -EINVAL;
97
98 sync_fence_get(f->os_fence);
99 sync_fence_install(f->os_fence, fd);
100
101 return 0;
102#else
103 return -ENODEV;
104#endif
105}
106
91int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f, 107int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
92 unsigned long timeout) 108 unsigned long timeout)
93{ 109{
@@ -107,26 +123,6 @@ bool gk20a_fence_is_expired(struct gk20a_fence *f)
107 return true; 123 return true;
108} 124}
109 125
110int gk20a_fence_install_fd(struct gk20a_fence *f)
111{
112#ifdef CONFIG_SYNC
113 int fd;
114
115 if (!f || !gk20a_fence_is_valid(f) || !f->sync_fence)
116 return -EINVAL;
117
118 fd = get_unused_fd_flags(O_RDWR);
119 if (fd < 0)
120 return fd;
121
122 sync_fence_get(f->sync_fence);
123 sync_fence_install(f->sync_fence, fd);
124 return fd;
125#else
126 return -ENODEV;
127#endif
128}
129
130int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count) 126int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
131{ 127{
132 int err; 128 int err;
@@ -195,13 +191,14 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c)
195 191
196void gk20a_init_fence(struct gk20a_fence *f, 192void gk20a_init_fence(struct gk20a_fence *f,
197 const struct gk20a_fence_ops *ops, 193 const struct gk20a_fence_ops *ops,
198 struct sync_fence *sync_fence) 194 struct sync_fence *os_fence)
199{ 195{
200 if (!f) 196 if (!f)
201 return; 197 return;
202 f->ops = ops; 198 f->ops = ops;
203 f->sync_fence = sync_fence;
204 f->syncpt_id = -1; 199 f->syncpt_id = -1;
200 f->semaphore = NULL;
201 f->os_fence = os_fence;
205} 202}
206 203
207/* Fences that are backed by GPU semaphores: */ 204/* Fences that are backed by GPU semaphores: */
@@ -227,36 +224,19 @@ static const struct gk20a_fence_ops nvgpu_semaphore_fence_ops = {
227 .is_expired = &nvgpu_semaphore_fence_is_expired, 224 .is_expired = &nvgpu_semaphore_fence_is_expired,
228}; 225};
229 226
230/* This function takes ownership of the semaphore */ 227/* This function takes ownership of the semaphore as well as the os_fence */
231int gk20a_fence_from_semaphore( 228int gk20a_fence_from_semaphore(
232 struct gk20a *g,
233 struct gk20a_fence *fence_out, 229 struct gk20a_fence *fence_out,
234 struct sync_timeline *timeline,
235 struct nvgpu_semaphore *semaphore, 230 struct nvgpu_semaphore *semaphore,
236 struct nvgpu_cond *semaphore_wq, 231 struct nvgpu_cond *semaphore_wq,
237 bool need_sync_fence) 232 struct sync_fence *os_fence)
238{ 233{
239 struct gk20a_fence *f = fence_out; 234 struct gk20a_fence *f = fence_out;
240 struct sync_fence *sync_fence = NULL;
241
242#ifdef CONFIG_SYNC
243 if (need_sync_fence) {
244 sync_fence = gk20a_sync_fence_create(g, timeline, semaphore,
245 "f-gk20a-0x%04x",
246 nvgpu_semaphore_gpu_ro_va(semaphore));
247 if (!sync_fence)
248 return -ENOMEM;
249 }
250#endif
251 235
252 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, sync_fence); 236 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence);
253 if (!f) { 237 if (!f)
254#ifdef CONFIG_SYNC
255 if (sync_fence)
256 sync_fence_put(sync_fence);
257#endif
258 return -EINVAL; 238 return -EINVAL;
259 } 239
260 240
261 f->semaphore = semaphore; 241 f->semaphore = semaphore;
262 f->semaphore_wq = semaphore_wq; 242 f->semaphore_wq = semaphore_wq;
@@ -306,32 +286,18 @@ static const struct gk20a_fence_ops gk20a_syncpt_fence_ops = {
306 .is_expired = &gk20a_syncpt_fence_is_expired, 286 .is_expired = &gk20a_syncpt_fence_is_expired,
307}; 287};
308 288
289/* This function takes the ownership of the os_fence */
309int gk20a_fence_from_syncpt( 290int gk20a_fence_from_syncpt(
310 struct gk20a_fence *fence_out, 291 struct gk20a_fence *fence_out,
311 struct nvgpu_nvhost_dev *nvhost_dev, 292 struct nvgpu_nvhost_dev *nvhost_dev,
312 u32 id, u32 value, 293 u32 id, u32 value, struct sync_fence *os_fence)
313 bool need_sync_fence)
314{ 294{
315 struct gk20a_fence *f = fence_out; 295 struct gk20a_fence *f = fence_out;
316 struct sync_fence *sync_fence = NULL;
317
318#ifdef CONFIG_SYNC
319 if (need_sync_fence) {
320 sync_fence = nvgpu_nvhost_sync_create_fence(nvhost_dev,
321 id, value, "fence");
322 if (IS_ERR(sync_fence))
323 return PTR_ERR(sync_fence);
324 }
325#endif
326 296
327 gk20a_init_fence(f, &gk20a_syncpt_fence_ops, sync_fence); 297 gk20a_init_fence(f, &gk20a_syncpt_fence_ops, os_fence);
328 if (!f) { 298 if (!f)
329#ifdef CONFIG_SYNC
330 if (sync_fence)
331 sync_fence_put(sync_fence);
332#endif
333 return -EINVAL; 299 return -EINVAL;
334 } 300
335 f->nvhost_dev = nvhost_dev; 301 f->nvhost_dev = nvhost_dev;
336 f->syncpt_id = id; 302 f->syncpt_id = id;
337 f->syncpt_value = value; 303 f->syncpt_value = value;
@@ -346,8 +312,7 @@ int gk20a_fence_from_syncpt(
346int gk20a_fence_from_syncpt( 312int gk20a_fence_from_syncpt(
347 struct gk20a_fence *fence_out, 313 struct gk20a_fence *fence_out,
348 struct nvgpu_nvhost_dev *nvhost_dev, 314 struct nvgpu_nvhost_dev *nvhost_dev,
349 u32 id, u32 value, 315 u32 id, u32 value, struct sync_fence *os_fence)
350 bool need_sync_fence)
351{ 316{
352 return -EINVAL; 317 return -EINVAL;
353} 318}
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
index 277603d1..6a28e657 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * GK20A Fences 4 * GK20A Fences
5 * 5 *
6 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 6 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"), 9 * copy of this software and associated documentation files (the "Software"),
@@ -44,9 +44,10 @@ struct gk20a_fence {
44 /* Valid for all fence types: */ 44 /* Valid for all fence types: */
45 bool valid; 45 bool valid;
46 struct nvgpu_ref ref; 46 struct nvgpu_ref ref;
47 struct sync_fence *sync_fence;
48 const struct gk20a_fence_ops *ops; 47 const struct gk20a_fence_ops *ops;
49 48
49 struct sync_fence *os_fence;
50
50 /* Valid for fences created from semaphores: */ 51 /* Valid for fences created from semaphores: */
51 struct nvgpu_semaphore *semaphore; 52 struct nvgpu_semaphore *semaphore;
52 struct nvgpu_cond *semaphore_wq; 53 struct nvgpu_cond *semaphore_wq;
@@ -62,18 +63,16 @@ struct gk20a_fence {
62 63
63/* Fences can be created from semaphores or syncpoint (id, value) pairs */ 64/* Fences can be created from semaphores or syncpoint (id, value) pairs */
64int gk20a_fence_from_semaphore( 65int gk20a_fence_from_semaphore(
65 struct gk20a *g,
66 struct gk20a_fence *fence_out, 66 struct gk20a_fence *fence_out,
67 struct sync_timeline *timeline,
68 struct nvgpu_semaphore *semaphore, 67 struct nvgpu_semaphore *semaphore,
69 struct nvgpu_cond *semaphore_wq, 68 struct nvgpu_cond *semaphore_wq,
70 bool need_sync_fence); 69 struct sync_fence *os_fence);
71 70
72int gk20a_fence_from_syncpt( 71int gk20a_fence_from_syncpt(
73 struct gk20a_fence *fence_out, 72 struct gk20a_fence *fence_out,
74 struct nvgpu_nvhost_dev *nvhost_dev, 73 struct nvgpu_nvhost_dev *nvhost_dev,
75 u32 id, u32 value, 74 u32 id, u32 value,
76 bool need_sync_fence); 75 struct sync_fence *os_fence);
77 76
78int gk20a_alloc_fence_pool( 77int gk20a_alloc_fence_pool(
79 struct channel_gk20a *c, 78 struct channel_gk20a *c,
@@ -87,7 +86,7 @@ struct gk20a_fence *gk20a_alloc_fence(
87 86
88void gk20a_init_fence(struct gk20a_fence *f, 87void gk20a_init_fence(struct gk20a_fence *f,
89 const struct gk20a_fence_ops *ops, 88 const struct gk20a_fence_ops *ops,
90 struct sync_fence *sync_fence); 89 struct sync_fence *os_fence);
91 90
92/* Fence operations */ 91/* Fence operations */
93void gk20a_fence_put(struct gk20a_fence *f); 92void gk20a_fence_put(struct gk20a_fence *f);
@@ -95,6 +94,7 @@ struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f);
95int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f, 94int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
96 unsigned long timeout); 95 unsigned long timeout);
97bool gk20a_fence_is_expired(struct gk20a_fence *f); 96bool gk20a_fence_is_expired(struct gk20a_fence *f);
98int gk20a_fence_install_fd(struct gk20a_fence *f); 97bool gk20a_fence_is_valid(struct gk20a_fence *f);
98int gk20a_fence_install_fd(struct gk20a_fence *f, int fd);
99 99
100#endif 100#endif
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 64ae4401..8c81b5b6 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -335,10 +335,6 @@ int gk20a_finalize_poweron(struct gk20a *g)
335 if (g->ops.fifo.channel_resume) 335 if (g->ops.fifo.channel_resume)
336 g->ops.fifo.channel_resume(g); 336 g->ops.fifo.channel_resume(g);
337 337
338 nvgpu_init_mm_ce_context(g);
339
340 nvgpu_vidmem_thread_unpause(&g->mm);
341
342done: 338done:
343 if (err) 339 if (err)
344 g->power_on = false; 340 g->power_on = false;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index bb0b572f..57854e11 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1408,6 +1408,11 @@ struct gk20a {
1408 void (*close)(struct channel_gk20a *ch); 1408 void (*close)(struct channel_gk20a *ch);
1409 void (*work_completion_signal)(struct channel_gk20a *ch); 1409 void (*work_completion_signal)(struct channel_gk20a *ch);
1410 void (*work_completion_cancel_sync)(struct channel_gk20a *ch); 1410 void (*work_completion_cancel_sync)(struct channel_gk20a *ch);
1411 bool (*os_fence_framework_inst_exists)(struct channel_gk20a *ch);
1412 int (*init_os_fence_framework)(
1413 struct channel_gk20a *ch, const char *fmt, ...);
1414 void (*signal_os_fence_framework)(struct channel_gk20a *ch);
1415 void (*destroy_os_fence_framework)(struct channel_gk20a *ch);
1411 } os_channel; 1416 } os_channel;
1412 1417
1413 struct gk20a_scale_profile *scale_profile; 1418 struct gk20a_scale_profile *scale_profile;
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index a8600bce..56c90da7 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -32,6 +32,7 @@
32#include <nvgpu/semaphore.h> 32#include <nvgpu/semaphore.h>
33#include <nvgpu/bug.h> 33#include <nvgpu/bug.h>
34#include <nvgpu/kref.h> 34#include <nvgpu/kref.h>
35#include "../common/linux/channel.h"
35 36
36#include "../drivers/staging/android/sync.h" 37#include "../drivers/staging/android/sync.h"
37 38
@@ -373,15 +374,9 @@ void gk20a_sync_timeline_destroy(struct sync_timeline *timeline)
373} 374}
374 375
375struct sync_timeline *gk20a_sync_timeline_create( 376struct sync_timeline *gk20a_sync_timeline_create(
376 const char *fmt, ...) 377 const char *name)
377{ 378{
378 struct gk20a_sync_timeline *obj; 379 struct gk20a_sync_timeline *obj;
379 char name[30];
380 va_list args;
381
382 va_start(args, fmt);
383 vsnprintf(name, sizeof(name), fmt, args);
384 va_end(args);
385 380
386 obj = (struct gk20a_sync_timeline *) 381 obj = (struct gk20a_sync_timeline *)
387 sync_timeline_create(&gk20a_sync_timeline_ops, 382 sync_timeline_create(&gk20a_sync_timeline_ops,
@@ -395,8 +390,7 @@ struct sync_timeline *gk20a_sync_timeline_create(
395} 390}
396 391
397struct sync_fence *gk20a_sync_fence_create( 392struct sync_fence *gk20a_sync_fence_create(
398 struct gk20a *g, 393 struct channel_gk20a *c,
399 struct sync_timeline *obj,
400 struct nvgpu_semaphore *sema, 394 struct nvgpu_semaphore *sema,
401 const char *fmt, ...) 395 const char *fmt, ...)
402{ 396{
@@ -404,7 +398,15 @@ struct sync_fence *gk20a_sync_fence_create(
404 va_list args; 398 va_list args;
405 struct sync_pt *pt; 399 struct sync_pt *pt;
406 struct sync_fence *fence; 400 struct sync_fence *fence;
407 struct gk20a_sync_timeline *timeline = to_gk20a_timeline(obj); 401 struct gk20a *g = c->g;
402
403 struct nvgpu_channel_linux *os_channel_priv = c->os_priv;
404 struct nvgpu_os_fence_framework *fence_framework = NULL;
405 struct gk20a_sync_timeline *timeline = NULL;
406
407 fence_framework = &os_channel_priv->fence_framework;
408
409 timeline = to_gk20a_timeline(fence_framework->timeline);
408 410
409 pt = gk20a_sync_pt_create_inst(g, timeline, sema); 411 pt = gk20a_sync_pt_create_inst(g, timeline, sema);
410 if (pt == NULL) 412 if (pt == NULL)
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
index 8a6439ab..ffdfaec3 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.h
@@ -34,12 +34,11 @@ struct nvgpu_semaphore;
34struct fence; 34struct fence;
35 35
36#ifdef CONFIG_SYNC 36#ifdef CONFIG_SYNC
37struct sync_timeline *gk20a_sync_timeline_create(const char *fmt, ...); 37struct sync_timeline *gk20a_sync_timeline_create(const char *name);
38void gk20a_sync_timeline_destroy(struct sync_timeline *); 38void gk20a_sync_timeline_destroy(struct sync_timeline *);
39void gk20a_sync_timeline_signal(struct sync_timeline *); 39void gk20a_sync_timeline_signal(struct sync_timeline *);
40struct sync_fence *gk20a_sync_fence_create( 40struct sync_fence *gk20a_sync_fence_create(
41 struct gk20a *g, 41 struct channel_gk20a *c,
42 struct sync_timeline *,
43 struct nvgpu_semaphore *, 42 struct nvgpu_semaphore *,
44 const char *fmt, ...); 43 const char *fmt, ...);
45struct sync_fence *gk20a_sync_fence_fdget(int fd); 44struct sync_fence *gk20a_sync_fence_fdget(int fd);
@@ -51,6 +50,10 @@ static inline struct sync_fence *gk20a_sync_fence_fdget(int fd)
51{ 50{
52 return NULL; 51 return NULL;
53} 52}
53static inline struct sync_timeline *gk20a_sync_timeline_create(
54 const char *name) {
55 return NULL;
56}
54#endif 57#endif
55 58
56#endif 59#endif