summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/semaphore.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 5814a737..39852273 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -90,7 +90,7 @@ out:
90 90
91void nvgpu_semaphore_sea_destroy(struct gk20a *g) 91void nvgpu_semaphore_sea_destroy(struct gk20a *g)
92{ 92{
93 if (!g->sema_sea) { 93 if (g->sema_sea == NULL) {
94 return; 94 return;
95 } 95 }
96 96
@@ -111,7 +111,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
111 } 111 }
112 112
113 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); 113 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
114 if (!g->sema_sea) { 114 if (g->sema_sea == NULL) {
115 return NULL; 115 return NULL;
116 } 116 }
117 117
@@ -163,7 +163,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
163 int ret; 163 int ret;
164 164
165 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); 165 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
166 if (!p) { 166 if (p == NULL) {
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 169
@@ -234,13 +234,13 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
234 p->sema_sea->map_size, 234 p->sema_sea->map_size,
235 0, gk20a_mem_flag_read_only, 0, 235 0, gk20a_mem_flag_read_only, 0,
236 p->sema_sea->sea_mem.aperture); 236 p->sema_sea->sea_mem.aperture);
237 if (!addr) { 237 if (addr == 0ULL) {
238 err = -ENOMEM; 238 err = -ENOMEM;
239 goto fail_unlock; 239 goto fail_unlock;
240 } 240 }
241 241
242 p->gpu_va_ro = addr; 242 p->gpu_va_ro = addr;
243 p->mapped = 1; 243 p->mapped = true;
244 244
245 gpu_sema_dbg(pool_to_gk20a(p), 245 gpu_sema_dbg(pool_to_gk20a(p),
246 " %d: GPU read-only VA = 0x%llx", 246 " %d: GPU read-only VA = 0x%llx",
@@ -262,7 +262,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
262 gk20a_mem_flag_none, 0, 262 gk20a_mem_flag_none, 0,
263 p->rw_mem.aperture); 263 p->rw_mem.aperture);
264 264
265 if (!addr) { 265 if (addr == 0ULL) {
266 err = -ENOMEM; 266 err = -ENOMEM;
267 goto fail_free_submem; 267 goto fail_free_submem;
268 } 268 }
@@ -305,7 +305,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
305 305
306 p->gpu_va = 0; 306 p->gpu_va = 0;
307 p->gpu_va_ro = 0; 307 p->gpu_va_ro = 0;
308 p->mapped = 0; 308 p->mapped = false;
309 309
310 __unlock_sema_sea(p->sema_sea); 310 __unlock_sema_sea(p->sema_sea);
311 311
@@ -324,7 +324,9 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
324 struct nvgpu_semaphore_sea *s = p->sema_sea; 324 struct nvgpu_semaphore_sea *s = p->sema_sea;
325 325
326 /* Freeing a mapped pool is a bad idea. */ 326 /* Freeing a mapped pool is a bad idea. */
327 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro); 327 WARN_ON((p->mapped) ||
328 (p->gpu_va != 0ULL) ||
329 (p->gpu_va_ro != 0ULL));
328 330
329 __lock_sema_sea(s); 331 __lock_sema_sea(s);
330 nvgpu_list_del(&p->pool_list_entry); 332 nvgpu_list_del(&p->pool_list_entry);
@@ -370,7 +372,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
370 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool; 372 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
371 int current_value; 373 int current_value;
372 374
373 BUG_ON(!p); 375 BUG_ON(p == NULL);
374 376
375 nvgpu_mutex_acquire(&p->pool_lock); 377 nvgpu_mutex_acquire(&p->pool_lock);
376 378
@@ -383,7 +385,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
383 } 385 }
384 386
385 hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int)); 387 hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int));
386 if (!hw_sema) { 388 if (hw_sema == NULL) {
387 ret = -ENOMEM; 389 ret = -ENOMEM;
388 goto fail_free_idx; 390 goto fail_free_idx;
389 } 391 }
@@ -416,7 +418,7 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
416 struct nvgpu_semaphore_int *hw_sema = ch->hw_sema; 418 struct nvgpu_semaphore_int *hw_sema = ch->hw_sema;
417 int idx = hw_sema->location.offset / SEMAPHORE_SIZE; 419 int idx = hw_sema->location.offset / SEMAPHORE_SIZE;
418 420
419 BUG_ON(!p); 421 BUG_ON(p == NULL);
420 422
421 nvgpu_mutex_acquire(&p->pool_lock); 423 nvgpu_mutex_acquire(&p->pool_lock);
422 424
@@ -439,7 +441,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
439 struct nvgpu_semaphore *s; 441 struct nvgpu_semaphore *s;
440 int ret; 442 int ret;
441 443
442 if (!ch->hw_sema) { 444 if (ch->hw_sema == NULL) {
443 ret = __nvgpu_init_hw_sema(ch); 445 ret = __nvgpu_init_hw_sema(ch);
444 if (ret) { 446 if (ret) {
445 return NULL; 447 return NULL;
@@ -447,7 +449,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
447 } 449 }
448 450
449 s = nvgpu_kzalloc(ch->g, sizeof(*s)); 451 s = nvgpu_kzalloc(ch->g, sizeof(*s));
450 if (!s) { 452 if (s == NULL) {
451 return NULL; 453 return NULL;
452 } 454 }
453 455
@@ -619,7 +621,7 @@ void nvgpu_semaphore_prepare(struct nvgpu_semaphore *s,
619 WARN_ON(s->incremented); 621 WARN_ON(s->incremented);
620 622
621 nvgpu_atomic_set(&s->value, next); 623 nvgpu_atomic_set(&s->value, next);
622 s->incremented = 1; 624 s->incremented = true;
623 625
624 gpu_sema_verbose_dbg(s->g, "INCR sema for c=%d (%u)", 626 gpu_sema_verbose_dbg(s->g, "INCR sema for c=%d (%u)",
625 hw_sema->ch->chid, next); 627 hw_sema->ch->chid, next);