summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/semaphore.c
diff options
context:
space:
mode:
authorSai Nikhil <snikhil@nvidia.com>2018-08-27 03:12:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-07 00:42:11 -0400
commit2dd9bb03dd56ca86b0e61b89fab38d38a58ecddf (patch)
treeb4de42f0056a65d1947c56f1f16afdcca29675a1 /drivers/gpu/nvgpu/common/semaphore.c
parent7f8226887c28267d3c2351692d4429ead1e17695 (diff)
gpu: nvgpu: changing page_idx from int to u64
page_idx is an element of the struct nvgpu_semaphore_pool, defined in include/nvgpu/semaphore.h file. page_idx can not be negative so changing it from int to u64 and its related changes in various files. This also fixes MISRA 10.4 violations in these files. Jira NVGPU-992 Change-Id: Ie9696dab7da9e139bc31563783b422c84144f18b Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1801632 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 39852273..44321770 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -79,7 +79,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
79 * integer range. This way any buggy comparisons would start to fail 79 * integer range. This way any buggy comparisons would start to fail
80 * sooner rather than later. 80 * sooner rather than later.
81 */ 81 */
82 for (i = 0; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4) { 82 for (i = 0U; i < PAGE_SIZE * SEMAPHORE_POOL_COUNT; i += 4U) {
83 nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0); 83 nvgpu_mem_wr(gk20a, &sea->sea_mem, i, 0xfffffff0);
84 } 84 }
85 85
@@ -192,7 +192,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
192 __unlock_sema_sea(sea); 192 __unlock_sema_sea(sea);
193 193
194 gpu_sema_dbg(sea->gk20a, 194 gpu_sema_dbg(sea->gk20a,
195 "Allocated semaphore pool: page-idx=%d", p->page_idx); 195 "Allocated semaphore pool: page-idx=%llu", p->page_idx);
196 196
197 *pool = p; 197 *pool = p;
198 return 0; 198 return 0;
@@ -221,7 +221,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
221 } 221 }
222 222
223 gpu_sema_dbg(pool_to_gk20a(p), 223 gpu_sema_dbg(pool_to_gk20a(p),
224 "Mapping semaphore pool! (idx=%d)", p->page_idx); 224 "Mapping semaphore pool! (idx=%llu)", p->page_idx);
225 225
226 /* 226 /*
227 * Take the sea lock so that we don't race with a possible change to the 227 * Take the sea lock so that we don't race with a possible change to the
@@ -243,7 +243,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
243 p->mapped = true; 243 p->mapped = true;
244 244
245 gpu_sema_dbg(pool_to_gk20a(p), 245 gpu_sema_dbg(pool_to_gk20a(p),
246 " %d: GPU read-only VA = 0x%llx", 246 " %llu: GPU read-only VA = 0x%llx",
247 p->page_idx, p->gpu_va_ro); 247 p->page_idx, p->gpu_va_ro);
248 248
249 /* 249 /*
@@ -272,10 +272,10 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
272 __unlock_sema_sea(p->sema_sea); 272 __unlock_sema_sea(p->sema_sea);
273 273
274 gpu_sema_dbg(pool_to_gk20a(p), 274 gpu_sema_dbg(pool_to_gk20a(p),
275 " %d: GPU read-write VA = 0x%llx", 275 " %llu: GPU read-write VA = 0x%llx",
276 p->page_idx, p->gpu_va); 276 p->page_idx, p->gpu_va);
277 gpu_sema_dbg(pool_to_gk20a(p), 277 gpu_sema_dbg(pool_to_gk20a(p),
278 " %d: CPU VA = 0x%p", 278 " %llu: CPU VA = 0x%p",
279 p->page_idx, p->rw_mem.cpu_va); 279 p->page_idx, p->rw_mem.cpu_va);
280 280
281 return 0; 281 return 0;
@@ -285,7 +285,7 @@ fail_free_submem:
285fail_unmap: 285fail_unmap:
286 nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro); 286 nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
287 gpu_sema_dbg(pool_to_gk20a(p), 287 gpu_sema_dbg(pool_to_gk20a(p),
288 " %d: Failed to map semaphore pool!", p->page_idx); 288 " %llu: Failed to map semaphore pool!", p->page_idx);
289fail_unlock: 289fail_unlock:
290 __unlock_sema_sea(p->sema_sea); 290 __unlock_sema_sea(p->sema_sea);
291 return err; 291 return err;
@@ -310,7 +310,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
310 __unlock_sema_sea(p->sema_sea); 310 __unlock_sema_sea(p->sema_sea);
311 311
312 gpu_sema_dbg(pool_to_gk20a(p), 312 gpu_sema_dbg(pool_to_gk20a(p),
313 "Unmapped semaphore pool! (idx=%d)", p->page_idx); 313 "Unmapped semaphore pool! (idx=%llu)", p->page_idx);
314} 314}
315 315
316/* 316/*
@@ -330,14 +330,14 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
330 330
331 __lock_sema_sea(s); 331 __lock_sema_sea(s);
332 nvgpu_list_del(&p->pool_list_entry); 332 nvgpu_list_del(&p->pool_list_entry);
333 clear_bit(p->page_idx, s->pools_alloced); 333 clear_bit((int)p->page_idx, s->pools_alloced);
334 s->page_count--; 334 s->page_count--;
335 __unlock_sema_sea(s); 335 __unlock_sema_sea(s);
336 336
337 nvgpu_mutex_destroy(&p->pool_lock); 337 nvgpu_mutex_destroy(&p->pool_lock);
338 338
339 gpu_sema_dbg(pool_to_gk20a(p), 339 gpu_sema_dbg(pool_to_gk20a(p),
340 "Freed semaphore pool! (idx=%d)", p->page_idx); 340 "Freed semaphore pool! (idx=%llu)", p->page_idx);
341 nvgpu_kfree(p->sema_sea->gk20a, p); 341 nvgpu_kfree(p->sema_sea->gk20a, p);
342} 342}
343 343
@@ -393,7 +393,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
393 ch->hw_sema = hw_sema; 393 ch->hw_sema = hw_sema;
394 hw_sema->ch = ch; 394 hw_sema->ch = ch;
395 hw_sema->location.pool = p; 395 hw_sema->location.pool = p;
396 hw_sema->location.offset = SEMAPHORE_SIZE * hw_sema_idx; 396 hw_sema->location.offset = SEMAPHORE_SIZE * (u32)hw_sema_idx;
397 current_value = nvgpu_mem_rd(ch->g, &p->rw_mem, 397 current_value = nvgpu_mem_rd(ch->g, &p->rw_mem,
398 hw_sema->location.offset); 398 hw_sema->location.offset);
399 nvgpu_atomic_set(&hw_sema->next_value, current_value); 399 nvgpu_atomic_set(&hw_sema->next_value, current_value);
@@ -590,7 +590,7 @@ bool nvgpu_semaphore_reset(struct nvgpu_semaphore_int *hw_sema)
590 * more than what we expect to be the max. 590 * more than what we expect to be the max.
591 */ 591 */
592 592
593 if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1, 593 if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1U,
594 current_val))) 594 current_val)))
595 return false; 595 return false;
596 596