summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorddutta <ddutta@nvidia.com>2018-09-18 03:14:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-20 06:27:55 -0400
commitfeefb7046a88311d88a37ad2cc934ec7b9a9c28f (patch)
tree4fc16843d6a8080ed3544971be5f19b15e3a42d5 /drivers
parent7e591dced99f328f4960702dbb6235fe7dc7f6b5 (diff)
gpu: nvgpu: minor fixes in channel_sync.c
This patch comes as a follow up to commit 2517d59be282426eec7a97745b76d745ff36c388 containing minor fixes i.e. changing type of 'pos' to u32 instead of int and renaming syncpt_get_id to channel_sync_syncpt_get_id Jira NVGPU-1086 Change-Id: I8bd9271c20d88ff5f68ccfc48a0b533844bbcaaa Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1829832 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/sync/channel_sync.c16
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/channel_sync.h6
2 files changed, 11 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/common/sync/channel_sync.c b/drivers/gpu/nvgpu/common/sync/channel_sync.c
index b4caab38..fa08a7d3 100644
--- a/drivers/gpu/nvgpu/common/sync/channel_sync.c
+++ b/drivers/gpu/nvgpu/common/sync/channel_sync.c
@@ -49,7 +49,7 @@ struct nvgpu_channel_sync_syncpt {
49 49
50int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, 50int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
51 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 51 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
52 u32 wait_cmd_size, int pos, bool preallocated) 52 u32 wait_cmd_size, u32 pos, bool preallocated)
53{ 53{
54 int err = 0; 54 int err = 0;
55 bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext( 55 bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext(
@@ -58,7 +58,7 @@ int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
58 if (is_expired) { 58 if (is_expired) {
59 if (preallocated) { 59 if (preallocated) {
60 nvgpu_memset(c->g, wait_cmd->mem, 60 nvgpu_memset(c->g, wait_cmd->mem,
61 (wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32), 61 (wait_cmd->off + pos * wait_cmd_size) * (u32)sizeof(u32),
62 0, wait_cmd_size * (u32)sizeof(u32)); 62 0, wait_cmd_size * (u32)sizeof(u32));
63 } 63 }
64 } else { 64 } else {
@@ -73,7 +73,7 @@ int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx", 73 nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
74 id, c->vm->syncpt_ro_map_gpu_va); 74 id, c->vm->syncpt_ro_map_gpu_va);
75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd, 75 c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd,
76 (u32)pos * wait_cmd_size, id, thresh, 76 pos * wait_cmd_size, id, thresh,
77 c->vm->syncpt_ro_map_gpu_va); 77 c->vm->syncpt_ro_map_gpu_va);
78 } 78 }
79 79
@@ -257,7 +257,7 @@ static void channel_sync_syncpt_set_safe_state(struct nvgpu_channel_sync *s)
257 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id); 257 nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id);
258} 258}
259 259
260static int syncpt_get_id(struct nvgpu_channel_sync *s) 260static int channel_sync_syncpt_get_id(struct nvgpu_channel_sync *s)
261{ 261{
262 struct nvgpu_channel_sync_syncpt *sp = 262 struct nvgpu_channel_sync_syncpt *sp =
263 container_of(s, struct nvgpu_channel_sync_syncpt, ops); 263 container_of(s, struct nvgpu_channel_sync_syncpt, ops);
@@ -329,7 +329,7 @@ channel_sync_syncpt_create(struct channel_gk20a *c, bool user_managed)
329 sp->ops.incr_user = channel_sync_syncpt_incr_user; 329 sp->ops.incr_user = channel_sync_syncpt_incr_user;
330 sp->ops.set_min_eq_max = channel_sync_syncpt_set_min_eq_max; 330 sp->ops.set_min_eq_max = channel_sync_syncpt_set_min_eq_max;
331 sp->ops.set_safe_state = channel_sync_syncpt_set_safe_state; 331 sp->ops.set_safe_state = channel_sync_syncpt_set_safe_state;
332 sp->ops.syncpt_id = syncpt_get_id; 332 sp->ops.syncpt_id = channel_sync_syncpt_get_id;
333 sp->ops.syncpt_address = channel_sync_syncpt_get_address; 333 sp->ops.syncpt_address = channel_sync_syncpt_get_address;
334 sp->ops.destroy = channel_sync_syncpt_destroy; 334 sp->ops.destroy = channel_sync_syncpt_destroy;
335 335
@@ -390,17 +390,17 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
390 390
391void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, 391void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
392 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, 392 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
393 u32 wait_cmd_size, int pos) 393 u32 wait_cmd_size, u32 pos)
394{ 394{
395 if (sema == NULL) { 395 if (sema == NULL) {
396 /* expired */ 396 /* expired */
397 nvgpu_memset(c->g, wait_cmd->mem, 397 nvgpu_memset(c->g, wait_cmd->mem,
398 (wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32), 398 (wait_cmd->off + pos * wait_cmd_size) * (u32)sizeof(u32),
399 0, wait_cmd_size * (u32)sizeof(u32)); 399 0, wait_cmd_size * (u32)sizeof(u32));
400 } else { 400 } else {
401 WARN_ON(!sema->incremented); 401 WARN_ON(!sema->incremented);
402 add_sema_cmd(c->g, c, sema, wait_cmd, 402 add_sema_cmd(c->g, c, sema, wait_cmd,
403 (u32)pos * wait_cmd_size, true, false); 403 pos * wait_cmd_size, true, false);
404 nvgpu_semaphore_put(sema); 404 nvgpu_semaphore_put(sema);
405 } 405 }
406} 406}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h b/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h
index b5936edc..f0b2b860 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/channel_sync.h
@@ -98,11 +98,11 @@ struct nvgpu_channel_sync {
98 98
99void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, 99void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
100 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, 100 struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
101 u32 wait_cmd_size, int pos); 101 u32 wait_cmd_size, u32 pos);
102 102
103int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, 103int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
104 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, 104 u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
105 u32 wait_cmd_size, int pos, bool preallocated); 105 u32 wait_cmd_size, u32 pos, bool preallocated);
106 106
107void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync, 107void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync,
108 bool set_safe_state); 108 bool set_safe_state);
@@ -110,4 +110,4 @@ struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c,
110 bool user_managed); 110 bool user_managed);
111bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g); 111bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g);
112 112
113#endif /* NVGPU_GK20A_CHANNEL_SYNC_GK20A_H */ 113#endif /* NVGPU_CHANNEL_SYNC_H */