summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c12
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h12
2 files changed, 2 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 72b8a04c..e1e6c027 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Nvgpu Semaphores 2 * Nvgpu Semaphores
3 * 3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -165,7 +165,6 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
165 165
166 p->page_idx = page_idx; 166 p->page_idx = page_idx;
167 p->sema_sea = sea; 167 p->sema_sea = sea;
168 nvgpu_init_list_node(&p->hw_semas);
169 nvgpu_init_list_node(&p->pool_list_entry); 168 nvgpu_init_list_node(&p->pool_list_entry);
170 nvgpu_ref_init(&p->ref); 169 nvgpu_ref_init(&p->ref);
171 170
@@ -301,7 +300,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
301 struct nvgpu_semaphore_pool *p = 300 struct nvgpu_semaphore_pool *p =
302 container_of(ref, struct nvgpu_semaphore_pool, ref); 301 container_of(ref, struct nvgpu_semaphore_pool, ref);
303 struct nvgpu_semaphore_sea *s = p->sema_sea; 302 struct nvgpu_semaphore_sea *s = p->sema_sea;
304 struct nvgpu_semaphore_int *hw_sema, *tmp;
305 303
306 /* Freeing a mapped pool is a bad idea. */ 304 /* Freeing a mapped pool is a bad idea. */
307 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro); 305 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro);
@@ -312,10 +310,6 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
312 s->page_count--; 310 s->page_count--;
313 __unlock_sema_sea(s); 311 __unlock_sema_sea(s);
314 312
315 nvgpu_list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas,
316 nvgpu_semaphore_int, hw_sema_list)
317 nvgpu_kfree(p->sema_sea->gk20a, hw_sema);
318
319 nvgpu_mutex_destroy(&p->pool_lock); 313 nvgpu_mutex_destroy(&p->pool_lock);
320 314
321 gpu_sema_dbg(pool_to_gk20a(p), 315 gpu_sema_dbg(pool_to_gk20a(p),
@@ -376,11 +370,8 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
376 hw_sema->idx = hw_sema_idx; 370 hw_sema->idx = hw_sema_idx;
377 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx; 371 hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx;
378 nvgpu_atomic_set(&hw_sema->next_value, 0); 372 nvgpu_atomic_set(&hw_sema->next_value, 0);
379 nvgpu_init_list_node(&hw_sema->hw_sema_list);
380 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0); 373 nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0);
381 374
382 nvgpu_list_add(&hw_sema->hw_sema_list, &p->hw_semas);
383
384 nvgpu_mutex_release(&p->pool_lock); 375 nvgpu_mutex_release(&p->pool_lock);
385 376
386 return 0; 377 return 0;
@@ -406,7 +397,6 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
406 clear_bit(ch->hw_sema->idx, p->semas_alloced); 397 clear_bit(ch->hw_sema->idx, p->semas_alloced);
407 398
408 /* Make sure that when the ch is re-opened it will get a new HW sema. */ 399 /* Make sure that when the ch is re-opened it will get a new HW sema. */
409 nvgpu_list_del(&ch->hw_sema->hw_sema_list);
410 nvgpu_kfree(ch->g, ch->hw_sema); 400 nvgpu_kfree(ch->g, ch->hw_sema);
411 ch->hw_sema = NULL; 401 ch->hw_sema = NULL;
412 402
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index d36a3270..a4af1ca3 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -58,17 +58,8 @@ struct nvgpu_semaphore_int {
58 int idx; /* Semaphore index. */ 58 int idx; /* Semaphore index. */
59 u32 offset; /* Offset into the pool. */ 59 u32 offset; /* Offset into the pool. */
60 nvgpu_atomic_t next_value; /* Next available value. */ 60 nvgpu_atomic_t next_value; /* Next available value. */
61 u32 nr_incrs; /* Number of increments programmed. */
62 struct nvgpu_semaphore_pool *p; /* Pool that owns this sema. */ 61 struct nvgpu_semaphore_pool *p; /* Pool that owns this sema. */
63 struct channel_gk20a *ch; /* Channel that owns this sema. */ 62 struct channel_gk20a *ch; /* Channel that owns this sema. */
64 struct nvgpu_list_node hw_sema_list; /* List of HW semaphores. */
65};
66
67static inline struct nvgpu_semaphore_int *
68nvgpu_semaphore_int_from_hw_sema_list(struct nvgpu_list_node *node)
69{
70 return (struct nvgpu_semaphore_int *)
71 ((uintptr_t)node - offsetof(struct nvgpu_semaphore_int, hw_sema_list));
72}; 63};
73 64
74/* 65/*
@@ -94,7 +85,6 @@ struct nvgpu_semaphore_pool {
94 u64 gpu_va_ro; /* GPU access to the pool. */ 85 u64 gpu_va_ro; /* GPU access to the pool. */
95 int page_idx; /* Index into sea bitmap. */ 86 int page_idx; /* Index into sea bitmap. */
96 87
97 struct nvgpu_list_node hw_semas; /* List of HW semas. */
98 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); 88 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE);
99 89
100 struct nvgpu_semaphore_sea *sema_sea; /* Sea that owns this pool. */ 90 struct nvgpu_semaphore_sea *sema_sea; /* Sea that owns this pool. */