summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2015-10-07 06:50:07 -0400
committerSachin Nikam <snikam@nvidia.com>2015-12-08 04:18:04 -0500
commit52753b51f1dbf51221d7856a9288aad1ab2d351a (patch)
tree70a9dbdba1087797202ec3e1a584408d82947bd9 /drivers/gpu/nvgpu/gk20a/channel_gk20a.h
parent937de14907bbc238d180defc1afe036faa24f1bc (diff)
gpu: nvgpu: create sync_fence only if needed
Currently, we create sync_fence (from nvhost_sync_create_fence()) for every submit But not all submits request for a sync_fence. Also, nvhost_sync_create_fence() API takes about 1/3rd of the total submit path. Hence to optimize, we can allocate sync_fence only when user explicitly asks for it using (NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET && NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) Also, in CDE path from gk20a_prepare_compressible_read(), we reuse existing fence stored in "state" and that can result into not returning sync_fence_fd when user asked for it Hence, force allocation of sync_fence when job submission comes from CDE path Bug 200141116 Change-Id: Ia921701bf0e2432d6b8a5e8b7d91160e7f52db1e Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/812845 (cherry picked from commit 5fd47015eeed00352cc8473eff969a66c94fee98) Reviewed-on: http://git-master/r/837662 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 55528dd9..d3428788 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -244,7 +244,8 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
244 u32 num_entries, 244 u32 num_entries,
245 u32 flags, 245 u32 flags,
246 struct nvgpu_fence *fence, 246 struct nvgpu_fence *fence,
247 struct gk20a_fence **fence_out); 247 struct gk20a_fence **fence_out,
248 bool force_need_sync_fence);
248 249
249int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c, 250int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
250 struct nvgpu_alloc_gpfifo_args *args); 251 struct nvgpu_alloc_gpfifo_args *args);