summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/channel.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-05-17 07:01:36 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-18 17:05:30 -0400
commitde67fb18fb639b7a605c77eeb2e1c639a8a3d67e (patch)
treef435a0f82f0e08d911c04f43ff91141f0b7471aa /drivers/gpu/nvgpu/common/linux/channel.c
parent6266a1210d9fa36916e7469a0107b73a076b43a1 (diff)
gpu: nvgpu: drop force_need_sync_fence in submit path
For CDE work a sync fence is always requested, but kernel does not need it and submit flags from userspace will be passed to the submit function in cde path so a sync fence will get created if necessary. To reduce some complexity, remove the explicit boolean in favor of just NVGPU_SUBMIT_FLAGS_SYNC_FENCE. Jira NVGPU-705 Change-Id: I8aac85288513ed7cc640acd021d892cee86f41d8 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1721785 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/channel.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/channel.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c
index f189d3ed..88bc2009 100644
--- a/drivers/gpu/nvgpu/common/linux/channel.c
+++ b/drivers/gpu/nvgpu/common/linux/channel.c
@@ -527,7 +527,6 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
527 struct priv_cmd_entry **wait_cmd, 527 struct priv_cmd_entry **wait_cmd,
528 struct priv_cmd_entry **incr_cmd, 528 struct priv_cmd_entry **incr_cmd,
529 struct gk20a_fence **post_fence, 529 struct gk20a_fence **post_fence,
530 bool force_need_sync_fence,
531 bool register_irq, 530 bool register_irq,
532 u32 flags) 531 u32 flags)
533{ 532{
@@ -539,13 +538,6 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
539 bool need_wfi = !(flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI); 538 bool need_wfi = !(flags & NVGPU_SUBMIT_FLAGS_SUPPRESS_WFI);
540 bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c); 539 bool pre_alloc_enabled = channel_gk20a_is_prealloc_enabled(c);
541 540
542 /*
543 * If user wants to always allocate sync_fence_fds then respect that;
544 * otherwise, allocate sync_fence_fd based on user flags.
545 */
546 if (force_need_sync_fence)
547 need_sync_fence = true;
548
549 if (g->aggressive_sync_destroy_thresh) { 541 if (g->aggressive_sync_destroy_thresh) {
550 nvgpu_mutex_acquire(&c->sync_lock); 542 nvgpu_mutex_acquire(&c->sync_lock);
551 if (!c->sync) { 543 if (!c->sync) {
@@ -772,7 +764,6 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
772 u32 flags, 764 u32 flags,
773 struct nvgpu_channel_fence *fence, 765 struct nvgpu_channel_fence *fence,
774 struct gk20a_fence **fence_out, 766 struct gk20a_fence **fence_out,
775 bool force_need_sync_fence,
776 struct fifo_profile_gk20a *profile) 767 struct fifo_profile_gk20a *profile)
777{ 768{
778 struct gk20a *g = c->g; 769 struct gk20a *g = c->g;
@@ -861,7 +852,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
861 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c)) 852 if (c->deterministic && !channel_gk20a_is_prealloc_enabled(c))
862 return -EINVAL; 853 return -EINVAL;
863 854
864 need_sync_framework = force_need_sync_fence || 855 need_sync_framework =
865 gk20a_channel_sync_needs_sync_framework(g) || 856 gk20a_channel_sync_needs_sync_framework(g) ||
866 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE && 857 (flags & NVGPU_SUBMIT_FLAGS_SYNC_FENCE &&
867 flags & NVGPU_SUBMIT_FLAGS_FENCE_GET); 858 flags & NVGPU_SUBMIT_FLAGS_FENCE_GET);
@@ -968,7 +959,6 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
968 err = gk20a_submit_prepare_syncs(c, fence, job, 959 err = gk20a_submit_prepare_syncs(c, fence, job,
969 &wait_cmd, &incr_cmd, 960 &wait_cmd, &incr_cmd,
970 &post_fence, 961 &post_fence,
971 force_need_sync_fence,
972 need_deferred_cleanup, 962 need_deferred_cleanup,
973 flags); 963 flags);
974 if (err) 964 if (err)