summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-27 03:12:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-07 00:42:11 -0400
commit2dd9bb03dd56ca86b0e61b89fab38d38a58ecddf (patch)
treeb4de42f0056a65d1947c56f1f16afdcca29675a1
parent7f8226887c28267d3c2351692d4429ead1e17695 (diff)
gpu: nvgpu: changing page_idx from int to u64
page_idx is an element of the struct nvgpu_semaphore_pool, defined in include/nvgpu/semaphore.h file. page_idx can not be negative so changing it from int to u64 and its related changes in various files. This also fixes MISRA 10.4 violations in these files. Jira NVGPU-992 Change-Id: Ie9696dab7da9e139bc31563783b422c84144f18b Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1801632 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h8
-rw-r--r--drivers/gpu/nvgpu/os/linux/nvgpu_mem.c2
-rw-r--r--drivers/gpu/nvgpu/os/linux/sync_sema_android.c2
-rw-r--r--drivers/gpu/nvgpu/os/posix/posix-nvgpu_mem.c2
7 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 39852273..44321770 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -79,7 +79,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
79 * integer range. This way any buggy comparisons would start to fail 79 * integer range. This way any buggy comparisons would start to fail
80 * sooner rather than later. 80 * sooner rather than later.
81 */ 81 */
82 for (i = 0; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4) { 82 for (i = 0U; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4U) {
83 nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0); 83 nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0);
84 } 84 }
85 85
@@ -192,7 +192,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
192 __unlock_sema_sea(sea); 192 __unlock_sema_sea(sea);
193 193
194 gpu_sema_dbg(sea->gk20a, 194 gpu_sema_dbg(sea->gk20a,
195 "Allocated semaphore pool: page-idx=%d", p->page_idx); 195 "Allocated semaphore pool: page-idx=%llu", p->page_idx);
196 196
197 *pool = p; 197 *pool = p;
198 return 0; 198 return 0;
@@ -221,7 +221,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
221 } 221 }
222 222
223 gpu_sema_dbg(pool_to_gk20a(p), 223 gpu_sema_dbg(pool_to_gk20a(p),
224 "Mapping semaphore pool! (idx=%d)", p->page_idx); 224 "Mapping semaphore pool! (idx=%llu)", p->page_idx);
225 225
226 /* 226 /*
227 * Take the sea lock so that we don't race with a possible change to the 227 * Take the sea lock so that we don't race with a possible change to the
@@ -243,7 +243,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
243 p->mapped = true; 243 p->mapped = true;
244 244
245 gpu_sema_dbg(pool_to_gk20a(p), 245 gpu_sema_dbg(pool_to_gk20a(p),
246 " %d: GPU read-only VA = 0x%llx", 246 " %llu: GPU read-only VA = 0x%llx",
247 p->page_idx, p->gpu_va_ro); 247 p->page_idx, p->gpu_va_ro);
248 248
249 /* 249 /*
@@ -272,10 +272,10 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
272 __unlock_sema_sea(p->sema_sea); 272 __unlock_sema_sea(p->sema_sea);
273 273
274 gpu_sema_dbg(pool_to_gk20a(p), 274 gpu_sema_dbg(pool_to_gk20a(p),
275 " %d: GPU read-write VA = 0x%llx", 275 " %llu: GPU read-write VA = 0x%llx",
276 p->page_idx, p->gpu_va); 276 p->page_idx, p->gpu_va);
277 gpu_sema_dbg(pool_to_gk20a(p), 277 gpu_sema_dbg(pool_to_gk20a(p),
278 " %d: CPU VA = 0x%p", 278 " %llu: CPU VA = 0x%p",
279 p->page_idx, p->rw_mem.cpu_va); 279 p->page_idx, p->rw_mem.cpu_va);
280 280
281 return 0; 281 return 0;
@@ -285,7 +285,7 @@ fail_free_submem:
285fail_unmap: 285fail_unmap:
286 nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro); 286 nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
287 gpu_sema_dbg(pool_to_gk20a(p), 287 gpu_sema_dbg(pool_to_gk20a(p),
288 " %d: Failed to map semaphore pool!", p->page_idx); 288 " %llu: Failed to map semaphore pool!", p->page_idx);
289fail_unlock: 289fail_unlock:
290 __unlock_sema_sea(p->sema_sea); 290 __unlock_sema_sea(p->sema_sea);
291 return err; 291 return err;
@@ -310,7 +310,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
310 __unlock_sema_sea(p->sema_sea); 310 __unlock_sema_sea(p->sema_sea);
311 311
312 gpu_sema_dbg(pool_to_gk20a(p), 312 gpu_sema_dbg(pool_to_gk20a(p),
313 "Unmapped semaphore pool! (idx=%d)", p->page_idx); 313 "Unmapped semaphore pool! (idx=%llu)", p->page_idx);
314} 314}
315 315
316/* 316/*
@@ -330,14 +330,14 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
330 330
331 __lock_sema_sea(s); 331 __lock_sema_sea(s);
332 nvgpu_list_del(&p->pool_list_entry); 332 nvgpu_list_del(&p->pool_list_entry);
333 clear_bit(p->page_idx, s->pools_alloced); 333 clear_bit((int)p->page_idx, s->pools_alloced);
334 s->page_count--; 334 s->page_count--;
335 __unlock_sema_sea(s); 335 __unlock_sema_sea(s);
336 336
337 nvgpu_mutex_destroy(&p->pool_lock); 337 nvgpu_mutex_destroy(&p->pool_lock);
338 338
339 gpu_sema_dbg(pool_to_gk20a(p), 339 gpu_sema_dbg(pool_to_gk20a(p),
340 "Freed semaphore pool! (idx=%d)", p->page_idx); 340 "Freed semaphore pool! (idx=%llu)", p->page_idx);
341 nvgpu_kfree(p->sema_sea->gk20a, p); 341 nvgpu_kfree(p->sema_sea->gk20a, p);
342} 342}
343 343
@@ -393,7 +393,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
393 ch->hw_sema = hw_sema; 393 ch->hw_sema = hw_sema;
394 hw_sema->ch = ch; 394 hw_sema->ch = ch;
395 hw_sema->location.pool = p; 395 hw_sema->location.pool = p;
396 hw_sema->location.offset = SEMAPHORE_SIZE * hw_sema_idx; 396 hw_sema->location.offset = SEMAPHORE_SIZE * (u32)hw_sema_idx;
397 current_value = nvgpu_mem_rd(ch->g, &p->rw_mem, 397 current_value = nvgpu_mem_rd(ch->g, &p->rw_mem,
398 hw_sema->location.offset); 398 hw_sema->location.offset);
399 nvgpu_atomic_set(&hw_sema->next_value, current_value); 399 nvgpu_atomic_set(&hw_sema->next_value, current_value);
@@ -590,7 +590,7 @@ bool nvgpu_semaphore_reset(struct nvgpu_semaphore_int *hw_sema)
590 * more than what we expect to be the max. 590 * more than what we expect to be the max.
591 */ 591 */
592 592
593 if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1, 593 if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1U,
594 current_val))) 594 current_val)))
595 return false; 595 return false;
596 596
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index f78df0b5..d7399403 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -366,13 +366,13 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
366 g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi); 366 g->ops.fifo.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
367 367
368 if (acquire) { 368 if (acquire) {
369 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3d" 369 gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
370 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 370 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
371 ch, nvgpu_semaphore_get_value(s), 371 ch, nvgpu_semaphore_get_value(s),
372 s->location.pool->page_idx, va, cmd->gva, 372 s->location.pool->page_idx, va, cmd->gva,
373 cmd->mem->gpu_va, ob); 373 cmd->mem->gpu_va, ob);
374 } else { 374 } else {
375 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3d" 375 gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu"
376 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u", 376 "va=0x%llx cmd_mem=0x%llx b=0x%llx off=%u",
377 ch, nvgpu_semaphore_get_value(s), 377 ch, nvgpu_semaphore_get_value(s),
378 nvgpu_semaphore_read(s), 378 nvgpu_semaphore_read(s),
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
index 32a7e388..2b8a5fd1 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
@@ -301,7 +301,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt);
301 */ 301 */
302int nvgpu_mem_create_from_mem(struct gk20a *g, 302int nvgpu_mem_create_from_mem(struct gk20a *g,
303 struct nvgpu_mem *dest, struct nvgpu_mem *src, 303 struct nvgpu_mem *dest, struct nvgpu_mem *src,
304 int start_page, int nr_pages); 304 u64 start_page, int nr_pages);
305 305
306/* 306/*
307 * Really free a vidmem buffer. There's a fair amount of work involved in 307 * Really free a vidmem buffer. There's a fair amount of work involved in
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index 85175069..3239551f 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -41,9 +41,9 @@
41 * Max number of channels that can be used is 512. This of course needs to be 41 * Max number of channels that can be used is 512. This of course needs to be
42 * fixed to be dynamic but still fast. 42 * fixed to be dynamic but still fast.
43 */ 43 */
44#define SEMAPHORE_POOL_COUNT 512 44#define SEMAPHORE_POOL_COUNT 512U
45#define SEMAPHORE_SIZE 16 45#define SEMAPHORE_SIZE 16U
46#define SEMAPHORE_SEA_GROWTH_RATE 32 46#define SEMAPHORE_SEA_GROWTH_RATE 32U
47 47
48struct nvgpu_semaphore_sea; 48struct nvgpu_semaphore_sea;
49 49
@@ -84,7 +84,7 @@ struct nvgpu_semaphore_pool {
84 struct nvgpu_list_node pool_list_entry; /* Node for list of pools. */ 84 struct nvgpu_list_node pool_list_entry; /* Node for list of pools. */
85 u64 gpu_va; /* GPU access to the pool. */ 85 u64 gpu_va; /* GPU access to the pool. */
86 u64 gpu_va_ro; /* GPU access to the pool. */ 86 u64 gpu_va_ro; /* GPU access to the pool. */
87 int page_idx; /* Index into sea bitmap. */ 87 u64 page_idx; /* Index into sea bitmap. */
88 88
89 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); 89 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE);
90 90
diff --git a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
index 4fc95db9..c57e3fdf 100644
--- a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
@@ -135,7 +135,7 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
135 */ 135 */
136int nvgpu_mem_create_from_mem(struct gk20a *g, 136int nvgpu_mem_create_from_mem(struct gk20a *g,
137 struct nvgpu_mem *dest, struct nvgpu_mem *src, 137 struct nvgpu_mem *dest, struct nvgpu_mem *src,
138 int start_page, int nr_pages) 138 u64 start_page, int nr_pages)
139{ 139{
140 int ret; 140 int ret;
141 u64 start = start_page * PAGE_SIZE; 141 u64 start = start_page * PAGE_SIZE;
diff --git a/drivers/gpu/nvgpu/os/linux/sync_sema_android.c b/drivers/gpu/nvgpu/os/linux/sync_sema_android.c
index 50465d0c..59e3b7a6 100644
--- a/drivers/gpu/nvgpu/os/linux/sync_sema_android.c
+++ b/drivers/gpu/nvgpu/os/linux/sync_sema_android.c
@@ -284,7 +284,7 @@ static void gk20a_sync_pt_value_str_for_sema(struct gk20a_sync_pt *pt,
284{ 284{
285 struct nvgpu_semaphore *s = pt->sema; 285 struct nvgpu_semaphore *s = pt->sema;
286 286
287 snprintf(str, size, "S: pool=%d [v=%u,r_v=%u]", 287 snprintf(str, size, "S: pool=%llu [v=%u,r_v=%u]",
288 s->location.pool->page_idx, 288 s->location.pool->page_idx,
289 nvgpu_semaphore_get_value(s), 289 nvgpu_semaphore_get_value(s),
290 nvgpu_semaphore_read(s)); 290 nvgpu_semaphore_read(s));
diff --git a/drivers/gpu/nvgpu/os/posix/posix-nvgpu_mem.c b/drivers/gpu/nvgpu/os/posix/posix-nvgpu_mem.c
index fa92a7c6..26770e47 100644
--- a/drivers/gpu/nvgpu/os/posix/posix-nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/os/posix/posix-nvgpu_mem.c
@@ -114,7 +114,7 @@ struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
114 114
115int nvgpu_mem_create_from_mem(struct gk20a *g, 115int nvgpu_mem_create_from_mem(struct gk20a *g,
116 struct nvgpu_mem *dest, struct nvgpu_mem *src, 116 struct nvgpu_mem *dest, struct nvgpu_mem *src,
117 int start_page, int nr_pages) 117 u64 start_page, int nr_pages)
118{ 118{
119 u64 start = start_page * PAGE_SIZE; 119 u64 start = start_page * PAGE_SIZE;
120 u64 size = nr_pages * PAGE_SIZE; 120 u64 size = nr_pages * PAGE_SIZE;