summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c191
1 files changed, 191 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
new file mode 100644
index 00000000..55fa0e32
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
@@ -0,0 +1,191 @@
1/*
2 * drivers/video/tegra/host/gk20a/semaphore_gk20a.c
3 *
4 * GK20A Semaphores
5 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include "semaphore_gk20a.h"
19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
21#include "gk20a.h"
22#include "mm_gk20a.h"
23
24static const int SEMAPHORE_SIZE = 16;
25
26struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc(struct device *d,
27 const char *unique_name, size_t capacity)
28{
29 struct gk20a_semaphore_pool *p;
30 p = kzalloc(sizeof(*p), GFP_KERNEL);
31 if (!p)
32 return NULL;
33
34 kref_init(&p->ref);
35 INIT_LIST_HEAD(&p->maps);
36 mutex_init(&p->maps_mutex);
37 p->dev = d;
38
39 /* Alloc one 4k page of semaphore per channel. */
40 p->size = roundup(capacity * SEMAPHORE_SIZE, PAGE_SIZE);
41 p->cpu_va = dma_alloc_coherent(d, p->size, &p->iova, GFP_KERNEL);
42 if (!p->cpu_va)
43 goto clean_up;
44 if (gk20a_get_sgtable(d, &p->sgt, p->cpu_va, p->iova, p->size))
45 goto clean_up;
46
47 if (gk20a_allocator_init(&p->alloc, unique_name, 0,
48 p->size, SEMAPHORE_SIZE))
49 goto clean_up;
50
51 gk20a_dbg_info("cpuva=%p iova=%llx phys=%llx", p->cpu_va,
52 (u64)sg_dma_address(p->sgt->sgl), (u64)sg_phys(p->sgt->sgl));
53 return p;
54clean_up:
55 if (p->cpu_va)
56 dma_free_coherent(d, p->size, p->cpu_va, p->iova);
57 if (p->sgt)
58 gk20a_free_sgtable(&p->sgt);
59 kfree(p);
60 return NULL;
61}
62
63static void gk20a_semaphore_pool_free(struct kref *ref)
64{
65 struct gk20a_semaphore_pool *p =
66 container_of(ref, struct gk20a_semaphore_pool, ref);
67 mutex_lock(&p->maps_mutex);
68 WARN_ON(!list_empty(&p->maps));
69 mutex_unlock(&p->maps_mutex);
70 gk20a_free_sgtable(&p->sgt);
71 dma_free_coherent(p->dev, p->size, p->cpu_va, p->iova);
72 gk20a_allocator_destroy(&p->alloc);
73 kfree(p);
74}
75
76static void gk20a_semaphore_pool_get(struct gk20a_semaphore_pool *p)
77{
78 kref_get(&p->ref);
79}
80
81void gk20a_semaphore_pool_put(struct gk20a_semaphore_pool *p)
82{
83 kref_put(&p->ref, gk20a_semaphore_pool_free);
84}
85
86static struct gk20a_semaphore_pool_map *
87gk20a_semaphore_pool_find_map(struct gk20a_semaphore_pool *p,
88 struct vm_gk20a *vm)
89{
90 struct gk20a_semaphore_pool_map *map, *found = NULL;
91 mutex_lock(&p->maps_mutex);
92 list_for_each_entry(map, &p->maps, list) {
93 if (map->vm == vm) {
94 found = map;
95 break;
96 }
97 }
98 mutex_unlock(&p->maps_mutex);
99 return found;
100}
101
102int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *p,
103 struct vm_gk20a *vm,
104 enum gk20a_mem_rw_flag rw_flag)
105{
106 struct gk20a_semaphore_pool_map *map;
107
108 WARN_ON(gk20a_semaphore_pool_find_map(p, vm));
109 map = kzalloc(sizeof(*map), GFP_KERNEL);
110 if (!map)
111 return -ENOMEM;
112 map->vm = vm;
113 map->rw_flag = rw_flag;
114 map->gpu_va = gk20a_gmmu_map(vm, &p->sgt, p->size,
115 0/*uncached*/, rw_flag);
116 if (!map->gpu_va) {
117 kfree(map);
118 return -ENOMEM;
119 }
120 mutex_lock(&p->maps_mutex);
121 list_add(&map->list, &p->maps);
122 mutex_unlock(&p->maps_mutex);
123 return 0;
124}
125
126void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *p,
127 struct vm_gk20a *vm)
128{
129 struct gk20a_semaphore_pool_map *map =
130 gk20a_semaphore_pool_find_map(p, vm);
131 if (!map)
132 return;
133 gk20a_gmmu_unmap(vm, map->gpu_va, p->size, map->rw_flag);
134 list_del(&map->list);
135 kfree(map);
136}
137
138u64 gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p,
139 struct vm_gk20a *vm)
140{
141 struct gk20a_semaphore_pool_map *map =
142 gk20a_semaphore_pool_find_map(p, vm);
143 if (!map)
144 return 0;
145 return map->gpu_va;
146}
147
148struct gk20a_semaphore *gk20a_semaphore_alloc(struct gk20a_semaphore_pool *pool)
149{
150 struct gk20a_semaphore *s;
151
152 s = kzalloc(sizeof(*s), GFP_KERNEL);
153 if (!s)
154 return NULL;
155
156 if (pool->alloc.alloc(&pool->alloc, &s->offset, SEMAPHORE_SIZE)) {
157 gk20a_err(pool->dev, "failed to allocate semaphore");
158 kfree(s);
159 return NULL;
160 }
161
162 gk20a_semaphore_pool_get(pool);
163 s->pool = pool;
164
165 kref_init(&s->ref);
166 s->value = (volatile u32 *)((uintptr_t)pool->cpu_va + s->offset);
167 *s->value = 0; /* Initially acquired. */
168 gk20a_dbg_info("created semaphore offset=%d, value_cpu=%p, value=%d",
169 s->offset, s->value, *s->value);
170 return s;
171}
172
173static void gk20a_semaphore_free(struct kref *ref)
174{
175 struct gk20a_semaphore *s =
176 container_of(ref, struct gk20a_semaphore, ref);
177
178 s->pool->alloc.free(&s->pool->alloc, s->offset, SEMAPHORE_SIZE);
179 gk20a_semaphore_pool_put(s->pool);
180 kfree(s);
181}
182
183void gk20a_semaphore_put(struct gk20a_semaphore *s)
184{
185 kref_put(&s->ref, gk20a_semaphore_free);
186}
187
188void gk20a_semaphore_get(struct gk20a_semaphore *s)
189{
190 kref_get(&s->ref);
191}