summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
index cf855463..3b17bfcb 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
@@ -23,7 +23,7 @@
23 23
24static const int SEMAPHORE_SIZE = 16; 24static const int SEMAPHORE_SIZE = 16;
25 25
26struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc(struct device *d, 26struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc(struct gk20a *g,
27 const char *unique_name, size_t capacity) 27 const char *unique_name, size_t capacity)
28{ 28{
29 struct gk20a_semaphore_pool *p; 29 struct gk20a_semaphore_pool *p;
@@ -34,30 +34,27 @@ struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc(struct device *d,
34 kref_init(&p->ref); 34 kref_init(&p->ref);
35 INIT_LIST_HEAD(&p->maps); 35 INIT_LIST_HEAD(&p->maps);
36 mutex_init(&p->maps_mutex); 36 mutex_init(&p->maps_mutex);
37 p->dev = d; 37 p->g = g;
38 38
39 /* Alloc one 4k page of semaphore per channel. */ 39 /* Alloc one 4k page of semaphore per channel. */
40 p->size = roundup(capacity * SEMAPHORE_SIZE, PAGE_SIZE); 40 if (gk20a_gmmu_alloc(g, roundup(capacity * SEMAPHORE_SIZE, PAGE_SIZE),
41 p->cpu_va = dma_alloc_coherent(d, p->size, &p->iova, GFP_KERNEL); 41 &p->mem))
42 if (!p->cpu_va)
43 goto clean_up;
44 if (gk20a_get_sgtable(d, &p->sgt, p->cpu_va, p->iova, p->size))
45 goto clean_up; 42 goto clean_up;
46 43
47 /* Sacrifice one semaphore in the name of returning error codes. */ 44 /* Sacrifice one semaphore in the name of returning error codes. */
48 if (gk20a_allocator_init(&p->alloc, unique_name, 45 if (gk20a_allocator_init(&p->alloc, unique_name,
49 SEMAPHORE_SIZE, p->size - SEMAPHORE_SIZE, 46 SEMAPHORE_SIZE, p->mem.size - SEMAPHORE_SIZE,
50 SEMAPHORE_SIZE)) 47 SEMAPHORE_SIZE))
51 goto clean_up; 48 goto clean_up;
52 49
53 gk20a_dbg_info("cpuva=%p iova=%llx phys=%llx", p->cpu_va, 50 gk20a_dbg_info("cpuva=%p iova=%llx phys=%llx", p->mem.cpu_va,
54 (u64)sg_dma_address(p->sgt->sgl), (u64)sg_phys(p->sgt->sgl)); 51 (u64)sg_dma_address(p->mem.sgt->sgl),
52 (u64)sg_phys(p->mem.sgt->sgl));
55 return p; 53 return p;
54
56clean_up: 55clean_up:
57 if (p->cpu_va) 56 if (p->mem.size)
58 dma_free_coherent(d, p->size, p->cpu_va, p->iova); 57 gk20a_gmmu_free(p->g, &p->mem);
59 if (p->sgt)
60 gk20a_free_sgtable(&p->sgt);
61 kfree(p); 58 kfree(p);
62 return NULL; 59 return NULL;
63} 60}
@@ -69,8 +66,7 @@ static void gk20a_semaphore_pool_free(struct kref *ref)
69 mutex_lock(&p->maps_mutex); 66 mutex_lock(&p->maps_mutex);
70 WARN_ON(!list_empty(&p->maps)); 67 WARN_ON(!list_empty(&p->maps));
71 mutex_unlock(&p->maps_mutex); 68 mutex_unlock(&p->maps_mutex);
72 gk20a_free_sgtable(&p->sgt); 69 gk20a_gmmu_free(p->g, &p->mem);
73 dma_free_coherent(p->dev, p->size, p->cpu_va, p->iova);
74 gk20a_allocator_destroy(&p->alloc); 70 gk20a_allocator_destroy(&p->alloc);
75 kfree(p); 71 kfree(p);
76} 72}
@@ -110,7 +106,7 @@ int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *p,
110 return -ENOMEM; 106 return -ENOMEM;
111 map->vm = vm; 107 map->vm = vm;
112 map->rw_flag = rw_flag; 108 map->rw_flag = rw_flag;
113 map->gpu_va = gk20a_gmmu_map(vm, &p->sgt, p->size, 109 map->gpu_va = gk20a_gmmu_map(vm, &p->mem.sgt, p->mem.size,
114 0/*uncached*/, rw_flag, 110 0/*uncached*/, rw_flag,
115 false); 111 false);
116 if (!map->gpu_va) { 112 if (!map->gpu_va) {
@@ -135,7 +131,7 @@ void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *p,
135 mutex_lock(&p->maps_mutex); 131 mutex_lock(&p->maps_mutex);
136 map = gk20a_semaphore_pool_find_map_locked(p, vm); 132 map = gk20a_semaphore_pool_find_map_locked(p, vm);
137 if (map) { 133 if (map) {
138 gk20a_gmmu_unmap(vm, map->gpu_va, p->size, map->rw_flag); 134 gk20a_gmmu_unmap(vm, map->gpu_va, p->mem.size, map->rw_flag);
139 gk20a_vm_put(vm); 135 gk20a_vm_put(vm);
140 list_del(&map->list); 136 list_del(&map->list);
141 kfree(map); 137 kfree(map);
@@ -168,7 +164,8 @@ struct gk20a_semaphore *gk20a_semaphore_alloc(struct gk20a_semaphore_pool *pool)
168 164
169 s->offset = gk20a_balloc(&pool->alloc, SEMAPHORE_SIZE); 165 s->offset = gk20a_balloc(&pool->alloc, SEMAPHORE_SIZE);
170 if (!s->offset) { 166 if (!s->offset) {
171 gk20a_err(pool->dev, "failed to allocate semaphore"); 167 gk20a_err(dev_from_gk20a(pool->g),
168 "failed to allocate semaphore");
172 kfree(s); 169 kfree(s);
173 return NULL; 170 return NULL;
174 } 171 }
@@ -177,10 +174,11 @@ struct gk20a_semaphore *gk20a_semaphore_alloc(struct gk20a_semaphore_pool *pool)
177 s->pool = pool; 174 s->pool = pool;
178 175
179 kref_init(&s->ref); 176 kref_init(&s->ref);
180 s->value = (volatile u32 *)((uintptr_t)pool->cpu_va + s->offset); 177 /* Initially acquired. */
181 *s->value = 0; /* Initially acquired. */ 178 gk20a_mem_wr(s->pool->g, &s->pool->mem, s->offset, 0);
182 gk20a_dbg_info("created semaphore offset=%d, value_cpu=%p, value=%d", 179 gk20a_dbg_info("created semaphore offset=%d, value=%d",
183 s->offset, s->value, *s->value); 180 s->offset,
181 gk20a_mem_rd(s->pool->g, &s->pool->mem, s->offset));
184 return s; 182 return s;
185} 183}
186 184