summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/semaphore.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-02-06 07:36:47 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-02-14 07:52:47 -0500
commit1582bdb5eeff44a93f53987080a652910d51c3c4 (patch)
tree85e55ae9c929dbe1acdd72ee84ba318a421d15f5 /drivers/gpu/nvgpu/common/semaphore.c
parenteb03270ff65b0fa52d080c2e8700acdf42b9ddee (diff)
gpu: nvgpu: delete nvgpu_semaphore_int list
The hw semas in a sema pool are stored in a list. All elements in this list are freed in a loop when a semaphore pool is destroyed. However, each hw sema is always owned by a channel, and each such channel frees its hw sema during channel closure before putting a ref to the VM which holds a ref to the sema pool, so the lifetime of all the hw semas is shorter than that of the pool and this list is always empty when freeing the pool. Delete the list and this freeing loop. Meanwhile delete also the nr_incrs member in nvgpu_semaphore_int that is never accessed. Jira NVGPU-512 Change-Id: Ie072029f9e7cc749141e9f02ef45fdf64358ad96 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1653540 Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 72b8a04c..e1e6c027 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Nvgpu Semaphores 2 * Nvgpu Semaphores
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -165,7 +165,6 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
165 165
166 p->page_idx = page_idx; 166 p->page_idx = page_idx;
167 p->sema_sea = sea; 167 p->sema_sea = sea;
168 nvgpu_init_list_node(&p->hw_semas);
169 nvgpu_init_list_node(&p->pool_list_entry); 168 nvgpu_init_list_node(&p->pool_list_entry);
170 nvgpu_ref_init(&p->ref); 169 nvgpu_ref_init(&p->ref);
171 170
@@ -301,7 +300,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
301 struct nvgpu_semaphore_pool *p = 300 struct nvgpu_semaphore_pool *p =
302 container_of(ref, struct nvgpu_semaphore_pool, ref); 301 container_of(ref, struct nvgpu_semaphore_pool, ref);
303 struct nvgpu_semaphore_sea *s = p->sema_sea; 302 struct nvgpu_semaphore_sea *s = p->sema_sea;
304 struct nvgpu_semaphore_int *hw_sema, *tmp;
305 303
306 /* Freeing a mapped pool is a bad idea. */ 304 /* Freeing a mapped pool is a bad idea. */
307 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro); 305 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro);
@@ -312,10 +310,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
312 s->page_count--; 310 s->page_count--;
313 __unlock_sema_sea(s); 311 __unlock_sema_sea(s);
314 312
315 nvgpu_list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas,
316 nvgpu_semaphore_int, hw_sema_list)
317 nvgpu_kfree(p->sema_sea->gk20a, hw_sema);
318
319 nvgpu_mutex_destroy(&p->pool_lock); 313 nvgpu_mutex_destroy(&p->pool_lock);
320 314
321 gpu_sema_dbg(pool_to_gk20a(p), 315 gpu_sema_dbg(pool_to_gk20a(p),
@@ -376,11 +370,8 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
376 hw_sema->idx = hw_sema_idx; 370 hw_sema->idx = hw_sema_idx;
377 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx; 371 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx;
378 nvgpu_atomic_set(&hw_sema->next_value, 0); 372 nvgpu_atomic_set(&hw_sema->next_value, 0);
379 nvgpu_init_list_node(&hw_sema->hw_sema_list);
380 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0); 373 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0);
381 374
382 nvgpu_list_add(&hw_sema->hw_sema_list, &p->hw_semas);
383
384 nvgpu_mutex_release(&p->pool_lock); 375 nvgpu_mutex_release(&p->pool_lock);
385 376
386 return 0; 377 return 0;
@@ -406,7 +397,6 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
406 clear_bit(ch->hw_sema->idx, p->semas_alloced); 397 clear_bit(ch->hw_sema->idx, p->semas_alloced);
407 398
408 /* Make sure that when the ch is re-opened it will get a new HW sema. */ 399 /* Make sure that when the ch is re-opened it will get a new HW sema. */
409 nvgpu_list_del(&ch->hw_sema->hw_sema_list);
410 nvgpu_kfree(ch->g, ch->hw_sema); 400 nvgpu_kfree(ch->g, ch->hw_sema);
411 ch->hw_sema = NULL; 401 ch->hw_sema = NULL;
412 402