summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/semaphore.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 72b8a04c..e1e6c027 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Nvgpu Semaphores 2 * Nvgpu Semaphores
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -165,7 +165,6 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
165 165
166 p->page_idx = page_idx; 166 p->page_idx = page_idx;
167 p->sema_sea = sea; 167 p->sema_sea = sea;
168 nvgpu_init_list_node(&p->hw_semas);
169 nvgpu_init_list_node(&p->pool_list_entry); 168 nvgpu_init_list_node(&p->pool_list_entry);
170 nvgpu_ref_init(&p->ref); 169 nvgpu_ref_init(&p->ref);
171 170
@@ -301,7 +300,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
301 struct nvgpu_semaphore_pool *p = 300 struct nvgpu_semaphore_pool *p =
302 container_of(ref, struct nvgpu_semaphore_pool, ref); 301 container_of(ref, struct nvgpu_semaphore_pool, ref);
303 struct nvgpu_semaphore_sea *s = p->sema_sea; 302 struct nvgpu_semaphore_sea *s = p->sema_sea;
304 struct nvgpu_semaphore_int *hw_sema, *tmp;
305 303
306 /* Freeing a mapped pool is a bad idea. */ 304 /* Freeing a mapped pool is a bad idea. */
307 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro); 305 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro);
@@ -312,10 +310,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
312 s->page_count--; 310 s->page_count--;
313 __unlock_sema_sea(s); 311 __unlock_sema_sea(s);
314 312
315 nvgpu_list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas,
316 nvgpu_semaphore_int, hw_sema_list)
317 nvgpu_kfree(p->sema_sea->gk20a, hw_sema);
318
319 nvgpu_mutex_destroy(&p->pool_lock); 313 nvgpu_mutex_destroy(&p->pool_lock);
320 314
321 gpu_sema_dbg(pool_to_gk20a(p), 315 gpu_sema_dbg(pool_to_gk20a(p),
@@ -376,11 +370,8 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
376 hw_sema->idx = hw_sema_idx; 370 hw_sema->idx = hw_sema_idx;
377 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx; 371 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx;
378 nvgpu_atomic_set(&hw_sema->next_value, 0); 372 nvgpu_atomic_set(&hw_sema->next_value, 0);
379 nvgpu_init_list_node(&hw_sema->hw_sema_list);
380 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0); 373 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0);
381 374
382 nvgpu_list_add(&hw_sema->hw_sema_list, &p->hw_semas);
383
384 nvgpu_mutex_release(&p->pool_lock); 375 nvgpu_mutex_release(&p->pool_lock);
385 376
386 return 0; 377 return 0;
@@ -406,7 +397,6 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
406 clear_bit(ch->hw_sema->idx, p->semas_alloced); 397 clear_bit(ch->hw_sema->idx, p->semas_alloced);
407 398
408 /* Make sure that when the ch is re-opened it will get a new HW sema. */ 399 /* Make sure that when the ch is re-opened it will get a new HW sema. */
409 nvgpu_list_del(&ch->hw_sema->hw_sema_list);
410 nvgpu_kfree(ch->g, ch->hw_sema); 400 nvgpu_kfree(ch->g, ch->hw_sema);
411 ch->hw_sema = NULL; 401 ch->hw_sema = NULL;
412 402